Search is not available for this dataset
text
string
meta
dict
\documentclass[a4paper,12pt, notitlepage]{article} \usepackage[top=25mm,bottom=25mm,left=25mm,right=25mm]{geometry} %\usepackage{amsmath} \usepackage{graphicx} %\usepackage{epstopdf} \usepackage{listings} \usepackage{color} \usepackage{url} \usepackage{setspace} %\usepackage[square, numbers]{natbib} \usepackage{titlesec} \usepackage{fancyhdr} \usepackage[ddmmyyyy]{datetime} \setstretch{1.44} \setlength{\columnsep}{6mm} \titleformat{\section}{\bfseries\large\scshape\filright}{\thesection}{1em}{} \titleformat{\subsection}{\bfseries\normalsize\scshape\filright}{\thesubsection}{1em}{} \renewcommand{\abstractname}{} \newcommand{\captionfonts}{\footnotesize} \renewcommand\thesection{\arabic{section}.} \renewcommand\thesubsection{\arabic{section}.\arabic{subsection}} \makeatletter \long\def\@makecaption#1#2{ \vskip\abovecaptionskip \sbox\@tempboxa{{\captionfonts #1: #2}} \ifdim \wd\@tempboxa >\hsize {\captionfonts #1: #2\par} \else \hbox to\hsize{\hfil\box\@tempboxa\hfil} \fi \vskip\belowcaptionskip} \makeatother \definecolor{dkgreen}{rgb}{0,0.6,0} \definecolor{gray}{rgb}{0.5,0.5,0.5} \definecolor{mauve}{rgb}{0.58,0,0.82} \lstset{frame=tb, language=python, aboveskip=3mm, belowskip=3mm, showstringspaces=false, columns=flexible, basicstyle={\small\ttfamily}, numbers=none, numberstyle=\tiny\color{gray}, keywordstyle=\color{blue}, commentstyle=\color{dkgreen}, stringstyle=\color{mauve}, breaklines=true, breakatwhitespace=true, tabsize=3 } \pagestyle{fancy} \fancyhf{} \rhead{Getting Started: Overview} \lhead{\includegraphics[height=1cm]{./Images/logo.pdf}} \rfoot{Page \thepage} \lfoot{} \renewcommand{\dateseparator}{.} \begin{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \title{\textbf{\large{Getting Started with the HILTOP: Overview}}} \author{\normalsize{Devtank Ltd.} \\ \small\textit{ Harry Geyer}} \date{\today} \maketitle \thispagestyle{fancy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{abstract} \noindent This is an overview of getting started with the HILTOP. If more detailed required, use the more specialised documentation attached. \end{abstract} \vspace{11mm} \newpage \tableofcontents \newpage %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Prerequisites} \label{sec: prereq} \textbf{Hardware:} \begin{enumerate} \item HILTOP \item Multimeter \item Linux machine to work with (Suggested) \end{enumerate} \noindent \textbf{Software:} \begin{enumerate} \item HILTOP development image \item libdevtankreborn library \end{enumerate} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Starting} \label{sec: start} \subsection{Renaming} \label{renameStart} First to rename your files, directories and contents of files to your project name, use the bash script in the base directory: \lstinline!./rename.sh <your_project_name>!. Ensure name does cannot be interpretted as a function i.e. ``print" is a bad name. As long as every project name is unique, renaming can be repeated if a name change is needed. In these documents, it is assumed this step has not been taken so nomenclature will be simple: replace ``example" with your project name. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \small{ \begin{thebibliography}{99} \setlength{\itemsep}{-2mm} \bibitem{Webpage} Page Title, Author {\url{https://www.url.org/}} {Accessed: dd.mm.yy}. \bibitem{Book} Author, {\em Book Name}. Publisher {\bf Edition}, Pages (Publish Year). \end{thebibliography} } \end{document}
{ "alphanum_fraction": 0.6555673948, "avg_line_length": 27.401459854, "ext": "tex", "hexsha": "29855adfbb2dfff95eabd6fdd1aad87b7ca450b5", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "32d354d2cb5c3b2f8838d07fb1e1d4ceb611e885", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "devtank-ltd/devtank-dtlib", "max_forks_repo_path": "docs/GettingStarted/overview.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "32d354d2cb5c3b2f8838d07fb1e1d4ceb611e885", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "devtank-ltd/devtank-dtlib", "max_issues_repo_path": "docs/GettingStarted/overview.tex", "max_line_length": 486, "max_stars_count": null, "max_stars_repo_head_hexsha": "32d354d2cb5c3b2f8838d07fb1e1d4ceb611e885", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "devtank-ltd/devtank-dtlib", "max_stars_repo_path": "docs/GettingStarted/overview.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1067, "size": 3754 }
\documentclass{article} \usepackage{booktabs} \usepackage{comment} \usepackage{hyperref} \usepackage{tabularx} \usepackage[dvipsnames]{xcolor} \title{SE 3XA3: Development Plan\\Legend of Python} \author{Team \#1, Lava Boys Inc \\ Bilal Jaffry, jaffryb \\ Giacomo Loparco, loparcog \\ Lucas Zacharewicz, zacharel } \date{} \begin{comment} \input{../Comments} \end{comment} \begin{document} \begin{table}[hp] \caption{Revision History} \label{TblRevisionHistory} \begin{tabularx}{\textwidth}{llX} \toprule \textbf{Date} & \textbf{Developer(s)} & \textbf{Change}\\ \midrule September 27 & Lucas & Added basic text for each category\\ September 28 & Lucas & Made the language and word use more professional\\ November 30 & Lucas & Revision 1\\ \bottomrule \end{tabularx} \end{table} \newpage \maketitle \section{Team Meeting Plan} We will be meeting weekly every tuesday from 16:30 to 19:00 on campus \textcolor{blue}{on the second floor of Thode Library.} In addition we will consistently updating one another on progress of the project via online communications. The meetings will mainly be to recap on the progress of the work done in the last week, and to distribute the work for the coming week. Any group members not able to make it to the weekly meeting need to inform the group at least 1 hour before meeting. \section{Team Communication Plan} We will write plain text notes during in-person meetings to keep track of ideas that we have and what we accomplish during the meetings. In addition we will use the group online messaging platform (Discord) to facilitate project discussion outside of the meetings. \section{Team Member Roles} We are mutual team leaders.\\ \\ Bilal Jaffry: Scribe, developer, designer\\ Giacomo Loparco: Developer, tester\\ Lucas Zacharewicz: Developer, tester\\ \section{Git Workflow Plan} We plan on using a feature branch workflow which is an extension of the regular centralized workflow that is the basis of git. Features that are being developed are done on separate branches and then those branches are merged into master as they become complete. This allows us to work on features seperatly and add them in without breaking what is already working. \textcolor{blue}{Lucas has been assigned to resolve merge conflicts and Bilal and Jack have been assigned to handle issues.} \section{Proof of Concept Demonstration Plan} \textcolor{blue}{The most difficult part of the coding will be implementing backend functionality such as sprite rendering / loading, enemy ai, and other base gameplay mechanics. Testing of such mechanics will be done rather easily with automated unit testing. The required Pygame library is relatively easy to obtain and install. As we are using python portability is not an concern.} We will aim to display a working test area that encaspulates the player with basic enemies and allow for the basic mechanics of the game to be tested. This will include rendering the game scenes, interacting with enemies, walking through multiroom areas, item interaction, and perhaps other features time permitted. \section{Technology} Python 3 will be the base language for the project as it is high level and will allow for more simplistic implementation with the use of the pygame library. Formal testing will be handled by the pytest library and we will use doxygen to handle documentation generation. We will facilitate our unit testing, build testing, and documentation generation through make for these actions to be effortless in our development cycle. There will be no chosen text editor for this project as different developers prefer different text editors and it has no particular ill effect to allow them to use what they are comfortable with. \section{Coding Style} \href{https://github.com/google/styleguide}{Google Python Style Guide} \section{Project Schedule} \href{https://gitlab.cas.mcmaster.ca/zacharel/pyDroid/tree/master/ProjectSchedule/Group1_Gantt_Rev0.pdf}{Gnatt Chart} \section{Project Review} \end{document}
{ "alphanum_fraction": 0.7921851667, "avg_line_length": 50.225, "ext": "tex", "hexsha": "505081ff5a567a66c13e45bcfbef3a931656c3aa", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "93c44a8e0a4f160acc8978e8fb4fd981d829088d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "loparcog/LegendOfPython", "max_forks_repo_path": "Doc/DevelopmentPlan/DevelopmentPlan.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "93c44a8e0a4f160acc8978e8fb4fd981d829088d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "loparcog/LegendOfPython", "max_issues_repo_path": "Doc/DevelopmentPlan/DevelopmentPlan.tex", "max_line_length": 702, "max_stars_count": null, "max_stars_repo_head_hexsha": "93c44a8e0a4f160acc8978e8fb4fd981d829088d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "loparcog/LegendOfPython", "max_stars_repo_path": "Doc/DevelopmentPlan/DevelopmentPlan.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 939, "size": 4018 }
\documentclass{beamer} \usepackage[utf8]{inputenc} \usepackage{graphicx} \usepackage{hhline} \usepackage{pdfpages} \usepackage{xcolor} \usepackage{makecell} \usepackage[mode=buildnew]{standalone} \usepackage{amsmath} \usepackage{mathtools} \usepackage{tikz} \usepackage{environ} \usepackage{fontawesome} \usepackage{caption} \usepackage[backend=bibtex,style=authoryear-comp,dashed=false,natbib=true]{biblatex} \addbibresource{bibtex.bib} % \setbeamertemplate{footline}[frame number] \graphicspath{{../img/}} \setbeamertemplate{navigation symbols}{} \setbeamertemplate{page number in head/foot}{} \setbeamertemplate{bibliography item}{} \setbeamertemplate{caption}[numbered] \setbeamercovered{transparent} \renewcommand\refname{Bibliography} \setbeamerfont{institute}{size=\small} \usetheme{Frankfurt} \usecolortheme{whale} \DeclareMathOperator*{\argmax}{argmax} \DeclareCaptionFormat{myformat}{\fontsize{5}{6}\selectfont#1#2#3} \captionsetup{format=myformat} \captionsetup[figure]{labelfont={bf},name={Fig.},labelsep=period} \captionsetup[table]{labelfont={bf},name={Table}} \newcommand{\customframefont}[1]{ \setbeamertemplate{itemize/enumerate body begin}{#1} \setbeamertemplate{itemize/enumerate subbody begin}{#1} } \NewEnviron{framefont}[1]{ \customframefont{#1} % for itemize/enumerate {#1 % For the text outside itemize/enumerate \BODY } \customframefont{\normalsize} } \setbeamertemplate{footline}{% \hfill% \usebeamercolor[fg]{page number in head/foot}% \usebeamerfont{page number in head/foot}% \insertframenumber% %\,/\,\inserttotalframenumber \kern1.2em\vskip4.5pt% } \renewcommand\cellgape{\Gape[3pt]} \definecolor{ao(english)}{rgb}{0.0, 0.5, 0.0} \title{Supervised Spam Classification} \subtitle{``Comparing Effectivity and Robustness of Sequential and Non-Sequential Machine-Learning Models"} \author{Atreya Shankar, Cognitive Systems (M.Sc.)} \institute{BM2: Intelligent Data Analysis (IDA) \\ University of Potsdam, SoSe 2019 \\ Prof. Dr. Tobias Scheffer} \date{August 08. 2019} \begin{document} \begin{frame} \maketitle \end{frame} \begin{frame} \frametitle{Table of Contents} \setbeamertemplate{enumerate items}[square] \begin{enumerate} \setlength\itemsep{1em} \item Introduction \item Methodologies \item Results \item Evaluation \item Conclusions \item Bibliography \end{enumerate} \end{frame} \section{Introduction} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Introduction} \vspace{-10pt} \begin{columns} \column{0.003\linewidth} \column{0.40\linewidth} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[trim={0.5cm 0cm 0.5cm 0.2cm},clip,width=4.7cm]{project_description.png} \caption{Spam project description} \end{figure} \column{0.60\linewidth} \begin{itemize} \setlength\itemsep{1.5em} \item Project description proposes using data in ``emails.mat" file with 10k instances and $\sim$50k features \item Bag-of-words form of data, which would only work for non-sequential learning \item Enron-spam pre-processed text data derived from Enron Corporation scandal; subset of employees' emails became publicly available \parencite{metsis2006spam} \item Consists of 33,716 text-based emails; 16,545 ``ham" and 17,171 spam instances \end{itemize} \end{columns} \end{frame} \end{framefont} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Objectives} \begin{itemize} \setlength\itemsep{1.5em} \item Utilize enron-spam emails database to implement both sequential and non-sequential supervised classifiers \item Meet project requirement to develop a classifier that attains 99.8\% recall on ``ham" emails \item Provide input into recall values for future spam emails given selected optimal threshold \item Additionally, provide insights into effectivity and robustness of sequential and non-sequential models \end{itemize} \end{frame} \end{framefont} \section{Methodologies} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Overview} \vspace{-10pt} \begin{columns} \column{0.003\linewidth} \column{0.40\linewidth} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[width=4.3cm]{train-validate-test.png} \caption{Data splitting schematic \parencite{split}} \end{figure} \column{0.60\linewidth} \begin{itemize} \setlength\itemsep{1.5em} \item Non-sequential model: Support Vector Machine (SVM) \item Sequential model: CNN-LSTM with word/character embeddings \item Due to time limitations, K-fold cross-validation was omitted \item Compromise: train/validate/test on the same subsets of data for fair comparison \item (Train $\cup$ Validation):Test $\Longrightarrow$ 70:30 \item Train:Validation $\Longrightarrow$ 85:15 \end{itemize} \end{columns} \end{frame} \end{framefont} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Non-Sequential Model: Support Vector Machine (SVM)} \vspace{-10pt} \begin{columns} \column{0.003\linewidth} \column{0.40\linewidth} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[width=4.3cm]{svm.png} \caption{Support Vector Machine (SVM) schematic \parencite{svmSchematic}} \end{figure} \column{0.60\linewidth} \begin{itemize} \setlength\itemsep{1.5em} \item Pre-processing text to normalized bag-of-words representation with $|V| = 5,000$ words \item Sklearn's \texttt{SGDClassifier} with Mini-Batch SGD and early stopping \item Linear and approximated RBF Kernel \texttt{(RBFSampler)} \item Grid-search over batch-size, regularization term $\alpha$, RBF kernel $\gamma$, and number of sampling components for \texttt{RBFSampler} \end{itemize} \end{columns} \end{frame} \end{framefont} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Sequential Model: CNN-LSTM (Words)} \vspace{-10pt} \begin{columns} \column{0.003\linewidth} \column{0.40\linewidth} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[width=3cm]{model.png} \caption{Keras schematic for CNN-LSTM (Words)} \end{figure} \column{0.60\linewidth} \begin{itemize} \setlength\itemsep{1.5em} \item Pre-processing text to padded/clipped integer encoded tokens with $|V| = 5,000$ words \item 1-dimensional CNN with varying filters to enrich sequential features; LSTM cell to capture short and long-term sequential relationships; dropout regularization for model robustness \item Grid-search over embedding dimensions, dropout rate, batch-size and learning rate \item Learning both with and without pre-trained GloVe word vectors ($\sim$6 billion tokens) \end{itemize} \end{columns} \end{frame} \end{framefont} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Sequential Model: CNN-LSTM (Words+Characters)} \vspace{-10pt} \begin{columns} \column{0.003\linewidth} \column{0.40\linewidth} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[width=4.6cm]{model_combined.png} \caption{Keras schematic for CNN-LSTM (Words+Characters)} \end{figure} \column{0.60\linewidth} \begin{itemize} \setlength\itemsep{1.5em} \item Using character sequences to overcome unknown token issue; same general architecture as before \item Grid-search over embedding dimensions, dropout rate, batch-size and learning rate \item Learning both with and without pre-trained GloVe word vectors ($\sim$6 billion tokens) \item Approximating GloVe character embeddings by averaging over character-containing word vectors \parencite{charEmbed} \end{itemize} \end{columns} \end{frame} \end{framefont} \section{Results} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Grid-search optimal models} \begin{table} \centering \captionsetup{justification=centering} \bgroup \def\arraystretch{1.5} \begin{tabular}{|c|c|c|} \hline Classifier & Test F$_1$ & ROC-AUC \\ \hhline{|=|=|=|} SVM (Linear Kernel) & 0.9836 & \textbf{0.9965} \\ \hline SVM (Approximated RBF Kernel) & 0.3437 & 0.4063 \\ \hline CNN-LSTM (Words) & 0.9753 & 0.9972 \\ \hline CNN-LSTM (Words+Characters) & 0.9808 & 0.9975 \\ \hline \makecell{CNN-LSTM \\(Words+GloVe)} & 0.9902 & \textbf{0.9989} \\ \hline \makecell{CNN-LSTM \\(Words+Characters+GloVe)} & 0.9902 & \textbf{0.9989} \\ \hline \end{tabular} \egroup \caption{Summary of grid-search optimal models; zero rule classifier baseline is 50.9\%; F$_1$ scores with fixed threshold at 0 and 0.5 for SVM and CNN-LSTM respectively} \end{table} \vspace{-10pt} \begin{itemize} \item Both sequential and non-sequential models achieve high F$_1$ and ROC-AUC test scores \end{itemize} \end{frame} \end{framefont} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{ROC Curve Test Dataset} \begin{figure} \captionsetup{justification=centering} \includegraphics[width=11cm]{roc_test.pdf} \caption{Receiver Operator Characteristic (ROC) curve for test dataset} \end{figure} \end{frame} \end{framefont} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{``Ham" Relative Importance Analysis (SVM)} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[width=10.5cm]{ham_words.pdf} \caption{Relative importance analysis for SVM (linear kernel)} \end{figure} \end{frame} \end{framefont} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Spam Relative Importance Analysis (SVM)} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[width=10.5cm]{spam_words.pdf} \caption{Relative importance analysis for SVM (linear kernel)} \end{figure} \end{frame} \end{framefont} \section{Evaluation} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Optimal Threshold Analysis} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[trim={0cm 0cm 0.2cm 0cm},clip,width=11.2cm]{combined.pdf} \caption{Precision-Recall curve (ham label) for optimal threshold analysis} \end{figure} \end{frame} \end{framefont} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Optimal Threshold Performance} \begin{table} \centering \bgroup \def\arraystretch{1.5} \begin{tabular}{|c|c|c|c|} \hline Classifier & Threshold & Recall $\lbrack$Spam$\rbrack$ & Recall $\lbrack$Ham$\rbrack$ \\ \hhline{|=|=|=|=|} SVM (Linear Kernel) & 1.100 & \color{red} 0.6040 & \textbf{0.9982} \\ \hline SVM (Approximated RBF Kernel) & 3.629 & 0.1449 & 0.8659 \\ \hline CNN-LSTM (Words) & 0.9997 & 0.8168 & 0.9976 \\ \hline CNN-LSTM (Words+Characters) & 0.9901 & \color{red} 0.6979 & \textbf{0.9982} \\ \hline \makecell{CNN-LSTM \\(Words+GloVe)} & 0.9997 & 0.9247 & 0.9973 \\ \hline \makecell{CNN-LSTM \\(Words+Characters+GloVe)} & 0.9972 & \color{ao(english)} 0.8908 & \textbf{0.9984} \\ \hline \end{tabular} \egroup \caption{Results of optimal threshold analysis} \end{table} \vspace{-10pt} \begin{itemize} \item Clear trade-off between ham and spam recall exists; most optimal model would be CNN-LSTM with words, characters and GloVe embeddings \end{itemize} \end{frame} \end{framefont} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Blind Dataset Performance (SMS Spam)} \begin{table} \centering \captionsetup{justification=centering} \bgroup \def\arraystretch{1.5} \begin{tabular}{|c|c|c|} \hline Classifier & Blind F$_1$ & ROC-AUC\\ \hhline{|=|=|=|} SVM (Linear Kernel) & 0.4688 & \textbf{0.7039} \\ \hline SVM (Approximated RBF Kernel) & 0.1785 & 0.4937 \\ \hline CNN-LSTM (Words) & 0.5090 & 0.6158 \\ \hline CNN-LSTM (Words+Characters) & 0.4416 & 0.6522 \\ \hline \makecell{CNN-LSTM \\(Words+GloVe)} & 0.2913 & \textbf{0.7567} \\ \hline \makecell{CNN-LSTM \\(Words+Characters+GloVe)} & 0.3017 & \textbf{0.7578} \\ \hline \end{tabular} \egroup \caption{Results of blind data test; zero rule classifier obtains 87\% due to class imbalance; F$_1$ scores with fixed threshold at 0 and 0.5 for SVM and CNN-LSTM respectively} \end{table} \vspace{-10pt} \begin{itemize} \item Words-based models perform consistently well on blind dataset (albeit worse than zero rule classifier) \item Considering sequential nature of data contributes to some robustness \end{itemize} \end{frame} \end{framefont} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{ROC Curve Blind Dataset} \begin{figure} \captionsetup{justification=centering} \includegraphics[width=11cm]{roc_blind.pdf} \caption{Receiver Operator Characteristic (ROC) curve for blind dataset} \end{figure} \end{frame} \end{framefont} \section{Conclusions} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Conclusions} \begin{itemize} \setlength\itemsep{1.2em} \item For spam detection, both sequential and non-sequential models are effective \item Trade-off exists between ``ham" and spam recall; an informed decision must be made. CNN-LSTM (words+characters+GloVe) performed best in terms of balanced spam and ham recalls \item Both sequential and non-sequential models tend to be robust to new datasets; although sequential models tend to carry richer and more discriminating features \item High cost of training CNN-LSTM; perhaps not economical for a company to deploy GPU on IMAP server \item \textbf{SVM would be a more efficient and scalable option} \end{itemize} \end{frame} \end{framefont} \subsection{} \begin{framefont}{\footnotesize} \begin{frame} \frametitle{Improvements to Embeddings in CNN-LSTM} \vspace{-10pt} \begin{columns} \column{0.003\linewidth} \column{0.40\linewidth} \centering \begin{figure} \captionsetup{justification=centering} \includegraphics[width=4.5cm]{improved_rnn.png} \caption{Improved word-character embedding model \parencite{improvedRNN}} \end{figure} \column{0.60\linewidth} \begin{itemize} \setlength\itemsep{1.5em} \item Separate pipeline for character sequences leads to symbolic overfitting on types of datasets \item Can overcome unknown tokens but contributes uncertainty in terms of dialects and expressions \item \textcite{improvedRNN} proposes a bidirectional LSTM to enrich word vector features \item This could address the unknown token issue without leading to overfitting on entire character sequences \end{itemize} \end{columns} \end{frame} \end{framefont} \begin{frame}[allowframebreaks] \frametitle{Bibliography} \nocite{*} \printbibliography[title = {Bibliography}] \end{frame} \end{document}
{ "alphanum_fraction": 0.7261438343, "avg_line_length": 35.4329411765, "ext": "tex", "hexsha": "87bf4396beecdca05ce8eb0441c755fae1e4ddbb", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-02-07T12:30:38.000Z", "max_forks_repo_forks_event_min_datetime": "2021-02-07T12:30:38.000Z", "max_forks_repo_head_hexsha": "e53197f55e0cb86c123fcd66a4cb84413036d024", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "atreyasha/spam_detection", "max_forks_repo_path": "docs/main.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "e53197f55e0cb86c123fcd66a4cb84413036d024", "max_issues_repo_issues_event_max_datetime": "2022-02-11T09:55:06.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-26T10:53:58.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "atreyasha/spam_detection", "max_issues_repo_path": "docs/main.tex", "max_line_length": 190, "max_stars_count": 2, "max_stars_repo_head_hexsha": "e53197f55e0cb86c123fcd66a4cb84413036d024", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "AtreyaSh/spam_detection", "max_stars_repo_path": "docs/main.tex", "max_stars_repo_stars_event_max_datetime": "2020-07-14T12:57:25.000Z", "max_stars_repo_stars_event_min_datetime": "2019-11-28T07:32:13.000Z", "num_tokens": 4939, "size": 15059 }
\section{Training} \label{sec:training_of_the_cnn:training} The actual training of the \acrlong{cnn} model is implemented in the Python script \texttt{cnn.py} and uses the high-level Keras \acrshort{api} from the \texttt{tf.keras} module \cite{training_arch_tf_keras}. Four main steps are required to train the model: \begin{enumerate} \item Defining the architecture (as shown in section \ref{subsec:training_of_the_cnn:architecture:implementation}) \item Loading the training and validation datasets \item Compiling the model \item Fitting the model \end{enumerate} The first step is the definition of the \acrshort{cnn} architecture explained in section \ref{subsec:training_of_the_cnn:architecture:implementation}. The second step is to load the training and validation datasets. Since there are so many frames in the datasets, the frames are loaded in batches of \num{32} (see section \ref{subsec:training_of_the_cnn:dataset:splitting}). The pixel values of the frames are then converted to a 32-bit floating-point format, which is necessary for the training and further increases the required memory by a factor of \num{4}. To avoid unexpected behaviour during training, the pixel values have to be normalized to the range between \numrange{0}{1} \cite{training_train_scaling}. This is done with the help of the \texttt{Dataset\_Generator} class. The \texttt{Dataset\_Generator} class inherits from the \texttt{tf.keras.utils.Sequence} superclass. The constructor loads the entire labels NumPy array and intializes the attributes of the class. Every \texttt{tf.keras.utils.Sequence} must implement the \texttt{\_\_len\_\_} and \texttt{\_\_getitem\_\_} methods \cite{training_arch_tf_keras_sequence}. The \texttt{\_\_len\_\_} method returns the total number of batches when the build-in Python \texttt{len} function is called. The \texttt{\_\_getitem\_\_} method loads a batch of frames as a NumPy array, converts the datatype to \texttt{np.float32} and normalizes the pixel values to the range between \numrange{0}{1}. The normalization is done by dividing all pixel values by the largest possible pixel value of \num{255}. After the normalization, the labels array is split accordingly and the batch of normalized frames and labels is return as a tuple. The \texttt{\_\_getitem\_\_} method is invoked when the indexing operator \texttt{[]} is used on an object. The definition of the \texttt{Dataset\_Generator} class is shown in listing \ref{lst:dataset_generator_class} and the instantiation of the dataset objects is shown in listing \ref{lst:model_fitting} on line \ref{lst:ln:training_dataset} and \ref{lst:ln:validation_dataset}. \begin{lstlisting}[style=python, caption={\texttt{Dataset\_Generator} class}, label=lst:dataset_generator_class] class Dataset_Generator(utils.Sequence): def __init__(self, frames_name, labels_name, directory, num_batches, batch_size): self.frames_name = frames_name self.labels = np.load(directory / f'{labels_name}.npy') self.directory = directory self.num_batches = num_batches self.batch_size = batch_size def __len__(self): return self.num_batches def __getitem__(self, idx): start = idx * self.batch_size end = start + self.batch_size name = f'{self.frames_name}_batch_{idx}_of_{self.num_batches}.npy' frames = np.load(self.directory / name) batch_x = frames.astype(np.float32) / 255.0 batch_y = np.asarray(self.labels[start:end]) return batch_x, batch_y \end{lstlisting} \begin{lstlisting}[style=python, caption={Training of the model}, label=lst:model_fitting] # 2. Loading the datasets training_dataset = fh.Dataset_Generator((*\label{lst:ln:training_dataset}*) fh.training_frames_name, fh.training_labels_name, fh.dir_training_dataset, 4903, 32) validation_dataset = fh.Dataset_Generator((*\label{lst:ln:validation_dataset}*) fh.validation_frames_name, fh.validation_labels_name, fh.dir_validation_dataset, 1050, 32) # 3. Compile the model model.compile((*\label{lst:ln:compile}*) optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) # Save only the weights after each epoch cp_callback = tf.keras.callbacks.ModelCheckpoint( filepath=str(fh.dir_checkpoint / 'cp-{epoch:04d}.ckpt'), save_weights_only=True, verbose=1, save_freq='epoch') # 4. Fit the model history = model.fit((*\label{lst:ln:fit}*) x=training_dataset, epochs=10, validation_data=validation_dataset, callbacks=[cp_callback]) \end{lstlisting} In a third step, the \acrshort{cnn} model is compiled, which configures it for training \cite{training_arch_tf_keras_sequential}. For this reason, an optimizer, a loss function and desired metrics to evaluate are specified. The \textit{Adam} optimizer is used, as it is computationally efficient and can handle a large number of parameters \cite{training_arch_adam}. The cross-entropy loss function is used to evaluate how well the data is modeled. Therefore, it calculates the cross-entropy between the probability distribution of the predictions and the true labels \cite{training_train_entropy}. Keras provides the required implementation with the \texttt{SparseCategoricalCrossentropy} class. The \textit{Sparse} indicates that the labels must be provided as integers rather than in a one-hot representation. Additionally, the \textit{Categorical} indicates that it is a multiclass classification problem and not a binary one \cite{training_train_tf_keras_crossentropy}. The method call is shown in listing \ref{lst:model_fitting} on line \ref{lst:ln:compile}. The last step is the fitting of the model, which is done in so-called epochs. During an epoch, the entire dataset is used to train the model. The training is performed batch-wise, which means that the weights are only updated after considering an entire batch \cite{training_arch_tf_keras_sequential}. The implementation saves the weights after each of the ten epochs (see listing \ref{lst:model_fitting} on line \ref{lst:ln:fit}). The training of the \acrshort{cnn} model is performed on a decent laptop computer featuring an Intel Core i7-8850H processor with a max. turbo frequency of \SI{4.30}{GHz}, a mobile NVIDIA Quadro P3200 graphics card with \SI{6}{GiB} of dedicated \acrshort{gpu} memory and \SI{48}{GiB} of \acrshort{ram}. The NVIDIA Quadro P3200 \acrshort{gpu} is CUDA-enabled and features a compute capability of \num{6.1} (which currently ranges from \numrange{2.0}{8.0}). As a result, the CUDA toolkit allows the training process to take advantage of \acrshort{gpu} acceleration \cite{training_train_nvidia}. The training of a single epoch takes about \SI{76}{min} on the \acrshort{cpu} and about \SI{6}{\min} on the \acrshort{gpu}. This is a speedup of over $12\times$ when using the graphics card compared to the processor. \subsection{Training Results} \label{subsec:training_of_the_cnn:training:training_results} Figure \ref{fig:training_results} shows the classification accuracy of the training and the validation dataset after each of the ten epochs. It is evident that the accuracy converges extremely fast, which is probably due to the ideal conditions (e.g. consistent background, good lighting). For this reason, no hyperparameter changes (e.g. number of epochs, batch size) are required. The final classification accuracy is \num{0.993} for the training split and \num{0.996} for the validation split. This shows that there is no overfitting and that the \acrshort{cnn} model generalized well. \begin{figure} \centering \includegraphics[width=\textwidth]{training_results} \caption{Results of the training of the \acrshort{cnn} model} \label{fig:training_results} \end{figure} \subsection{Saving of the Model} \label{subsec:training_of_the_cnn:training:saving_of_the_model} The final model fit is saved in the TensorFlow SavedModel file format (\texttt{assets/}, \texttt{variables/}, \texttt{saved\_model.pb}), which includes the weights and the computation (e.g. architecture, optimizer state). This is very useful for sharing and deploying of the \acrshort{cnn} model \cite{training_train_tf_keras_saving_loading}. Inference tasks commonly use the frozen graph file format (single \texttt{.pb} file), which contains the architecture and the weights. Xilinx requires such a frozen graph for the quantization of the \acrshort{cnn} model. Unfortunately, TensorFlow dropped support for freezing models since v2.x. It is, however, still possible to freeze a model created with TensorFlow 2 with the usage of low-level TensorFlow \acrshort{api} calls. Listing \ref{lst:frozen_graph} shows how the \texttt{frozen\_graph.pb} file is created \cite{training_train_frozen}. For inference tasks, it is important to know the names of the input and output layers of the frozen graph. How this information can be obtained is shown in listing \ref{lst:frozen_graph} on line \ref{lst:ln:input_info} and \ref{lst:ln:output_info}. The name of the input layer is \texttt{x} and the name of the output layer is \texttt{Identity}. \clearpage \begin{lstlisting}[style=python, caption={Saving the model in the frozen graph file format \cite{training_train_frozen}}, label=lst:frozen_graph] # Saving the frozen graph # Convert the Keras model to a concrete function full_model = tf.function(lambda x: model(x)) full_model = full_model.get_concrete_function( x=tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)) # Get the frozen concrete function frozen_func = convert_variables_to_constants_v2(full_model) frozen_func.graph.as_graph_def() # Display information about the input and output layers print(f'Input: {frozen_func.inputs}')(*\label{lst:ln:input_info}*) print(f'Output: {frozen_func.outputs}')(*\label{lst:ln:output_info}*) # Save the frozen graph from the frozen concrete function tf.io.write_graph( graph_or_graph_def=frozen_func.graph, logdir=str(fh.dir_frozen_model), name='frozen_graph.pb', as_text=False) \end{lstlisting}
{ "alphanum_fraction": 0.7858286629, "avg_line_length": 60.5575757576, "ext": "tex", "hexsha": "b59c05c81152721db4b95e405b1140f20f359aed", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-09-20T14:17:25.000Z", "max_forks_repo_forks_event_min_datetime": "2020-09-20T14:17:25.000Z", "max_forks_repo_head_hexsha": "f2379782660d4053a5bb60b9f6c6dea17363f96d", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "MuellerDominik/AIonFPGA", "max_forks_repo_path": "doc/thesis/chapters/training_of_the_cnn/training.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f2379782660d4053a5bb60b9f6c6dea17363f96d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "MuellerDominik/AIonFPGA", "max_issues_repo_path": "doc/thesis/chapters/training_of_the_cnn/training.tex", "max_line_length": 302, "max_stars_count": 3, "max_stars_repo_head_hexsha": "f2379782660d4053a5bb60b9f6c6dea17363f96d", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "MuellerDominik/AIonFPGA", "max_stars_repo_path": "doc/thesis/chapters/training_of_the_cnn/training.tex", "max_stars_repo_stars_event_max_datetime": "2021-12-22T13:36:12.000Z", "max_stars_repo_stars_event_min_datetime": "2021-01-21T09:42:10.000Z", "num_tokens": 2479, "size": 9992 }
% to reduce the size of the PDF: % use ghostscript: % gs -q -dSAFER -dNOPAUSE -sDEVICE=pdfwrite -sPDFSETTINGS=printer -sOutputFile="resdoc2006sc.pdf" resdoc2006sc.pdf % the key being sPDFSETTINGS: with options: default, screen, ebook, printer, preprint % try also: -sCompressPages=true % -sDownsampleColorImages=true % -sColorImageResolution=300 % -sGrayImageResolution=300 % -sMonoImageResolution=300 % final choice to get it under 8MB : % gs -q -dSAFER -dNOPAUSE -sDEVICE=pdfwrite -dPDFSETTINGS=/printer -dColorImageResolution=150 -dMonoImageResolution=150 -dGrayImageResolution=150 -dCompatibilityLevel=1.4 -sOutputFile="resdoc2006sc.printer.pdf" resdoc2006sc.pdf < /dev/null \documentclass{beamer} \usepackage{default} \mode<presentation> { % \usetheme{Hannover} % \usetheme{Boadilla} % \usecolortheme{dolphin} % \usecolortheme{seagull} \beamertemplatenavigationsymbolsempty % turn off navigation \hypersetup{pdfstartview={Fit}} % fits the presentation to the window when first displayed } \usepackage{fourier} \usepackage[english]{babel} % English language/hyphenation \usepackage[protrusion=true,expansion=true]{microtype} % Better typography \usepackage[toc,page]{appendix} \usepackage[utf8]{inputenc} \usepackage{csquotes} \usepackage{hyperref} % linking options %%% Equation and float numbering \usepackage{amsmath,amsfonts} % Math packages %\numberwithin{equation}{section} % Equationnumbering: section.eq# \numberwithin{figure}{section} % Figurenumbering: section.fig# \numberwithin{table}{section} % Tablenumbering: section.tab# \usepackage{graphicx} % Allows including images \usepackage{graphics} \usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables \newcommand{\D}{.} \newcommand{\bd}{\string~/bio\D data} % \string~ is a representation of the home directory \newcommand{\mpamap}{\bd/mpa/maps} % \string~ is a representation of the home directory \newcommand{\sab}{\bd/mpa/sab} % \string~ is a representation of the home directory %%% Title page \title[SAB/MPA Framework]{Assessment Framework for the St Anns Bank Marine Protected Area} \author[Choi, et al.]{ Jae S. Choi, Angelia S.M. Vanderlaan, Gordana Lazin, \\ Mike McMahon, Ben Zisserson, Brent Cameron, \\ Jenna Munden } \institute[DFO] { Population Ecology Division \\ Fisheries and Oceans Canada \\ Bedford Institute of Oceanography \\ } \date{\textsc{\today}} % Date, can be changed to a custom date \begin{document} % ---------------------------------------------------------------- \begin{frame} \titlepage % Print the title page as the first slide \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{Overview} % Table of contents slide, comment this block out to remove it \tableofcontents % Throughout your presentation, if you choose to use \section{} and \subsection{} commands, these will automatically be printed on this slide as an overview of your presentation \end{frame} % ---------------------------------------------------------------- \section{Purpose} \begin{frame} \frametitle{Terms of reference} \vspace*{-1.0cm} \begin{itemize} \item Health of the Oceans (HOTO, 2007-2012) \item National Conservation Plan (NCP, 2014-2019) \item Develop a monitoring approach for Marine Protected Areas (MPAs) \item Assess their effectiveness in meeting their objectives. \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{MPA: To conserve and protect} \begin{itemize} \item Commercial and non-commercial fishery resources, including marine mammals, and their \textbf{habitats}; \item Endangered or threatened marine species, and their \textbf{habitats}; \item Unique \textbf{habitats}; \item Marine areas of high \textbf{biodiversity} or biological \textbf{productivity}; \item Any other marine resource or habitat as is necessary to fulfill the mandate of the Minister. \end{itemize} \medskip \textbf{Bottom line}: productivity, biodiversity, habitat \& species of interest \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{St Anns Bank (SAB)} \begin{columns}[c] \begin{column}{.6\textwidth} \begin{figure}[h] \centering %\includegraphics[width=0.6\textwidth]{\mpamap/mpa_closeup.pdf} \includegraphics[width=1.0\textwidth]{\sab/SAB_MPA.pdf} %\caption{Bathymetic (100 m resolution) chart of the St. Anns Bank area with the proposed St Anns Bank Marine Protected Area (thick maroon polygon) and limited fishing zones (maroon lined polygons). See Figure\ref{fig:SAB} for geographic location in a larger map.} \label{fig:SABCloseup} \end{figure} \end{column} \begin{column}{.4\textwidth} Previous discussions of SAB: \begin{itemize} \item DFO 2012 \item Kenchington 2013 \item Ford \& Serdynska 2013 \end{itemize} \end{column} \end{columns} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{Objectives} \begin{block}{} \begin{itemize} \item monitor and assess the status/effectiveness of an MPA; \item identify data gaps and sources of uncertainty. \end{itemize} \end{block} \begin{block}{} \begin{flushright} \emph{... But how?} \end{flushright} \end{block} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{A few "minor" issues...} \begin{block}{SAB is:} \begin{itemize} \item a large ecosystem and as such complex, operating at various space, time and organizational scales; \item connected in various ways to the surrounding environment and so cannot be treated as an isolated system; \item measures of system components of interest, namely, productivity, biodiversity, habitat and species of interest, are ambiguous and imperfect at best, and usually non-existent or poor in information quality/quantity. \end{itemize} \end{block} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \frametitle{Design principles} \begin{block}{} \begin{itemize} \item Expansive area of interest (whole shelf sea) \item Leverage data from \textbf{on-going sampling} programs to smoothly transition into a routine monitoring approach into the future. \begin{itemize} \item Data that are clearly and directly related to MPA objectives/priorities \item Sufficient and regular \textbf{spatial} coverage ($>$ 100 sampling locations) inside MPA and throughout the study area. \item Sufficient and regular \textbf{temporal} coverage ($\sim$ annual, $>$~10 years) inside MPA and throughout the study area \item \textbf{Informative} -- high data quality that is in some manner related to productivity, biodiversity, habitat and species of interest. \end{itemize} \item Methods that are transparent, collaboratively developed and easily transferable to other regions \end{itemize} \end{block} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \frametitle{Data sources being considered} \begin{itemize} \item AZMP/chlorophyll-a and nutrients: BioChem bottle data \item AZMP/Zooplankton: BioChem database \item Remote Sensing Data: ocean colour and SST (Remote Sensing Group) \item Temperature records: from various sources, especially, Groundfish, Snow crab and AZMP surveys \item Salinity (Groundfish surveys/AZMP, BioChem) \item Oxygen and pH (once the data have been reloaded; Groundfish surveys/AZMP, BioChem) \item Bathymetry (CHS, Groundfish survey, Snow crab survey ) \item Groundfish: Research Vessel Surveys focus upon demersal fish species, since $\sim$ 2000, upon invertebrates as well \item Snow crab survey, focus upon benthic invertebrates \item Clam survey data in Banquereau and Western Banks (though it does not pass the temporal coverage conditions, it offers very high resolution multispecies data on the banks) \item Logbook records of catch and effort (MARFIS/ZIFF) \item AIS tracks -- Radio-based Automatic Identification System \item VMS potentially -- Satellite-based Vessel Monitoring System \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{BioChem Discrete Bottle Data: Chlorophyll-a and Nutrients} \begin{itemize} \item Relevance: productivity, biodiversity, habitat and species of interest (in relative order) \item Sampling: AZMP surveys, Groundfish surveys, pelagic net tows and water profiles \item Spatial coverage: variable no. stations, 143,499 records, 829 missions \item Temporal coverage: 1955 to present, annual surveys \item Source code: \url{https://github.com/jae0/aegis/tree/master/inst/scripts/biochem.r} \end{itemize} \end{frame} % ---------------------------------------------------------------- \section{Data} \begin{frame} \frametitle{} \begin{figure} \centering \includegraphics[width=0.7\textwidth]{\sab/2.png} \caption{Number of chlorophyll and nutrient profiles extracted from the BioChem database for each year since 1955.} \label{fig:ChloroMap} \end{figure} \begin{figure} \centering \includegraphics[width=0.5\textwidth]{\sab/3.png} \caption{Number of chlorophyll and nutrient profiles extracted from the BioChem database for the time period 1955-2014, grouped monthly.} \label{fig:ChloroFreq} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \frametitle{} \begin{figure} \centering \includegraphics[width=0.8\textwidth]{\sab/4.png} \caption{Monthly spatial distribution of discrete bottle data for the time period 1955-2014.} \label{fig:BottleMap} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{} \begin{figure} \centering \includegraphics[width=1.0\textwidth]{\sab/5.png} \caption{Depth profiles of chlorophyll-a and nutrients; all data for the time period 1955-2014.} \label{fig:ChloroProfiles} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{BioChem: Zooplankton} \begin{itemize} \item Relevance: productivity, biodiversity, species of interest, habitat (in relative order) \item Sampling: AZMP surveys, Groundfish surveys, pelagic net tows, 400 taxonomic species \item Spatial coverage: 2367 net deployments, 126 missions \item Temporal coverage:, 1999 to 2014, annual surveys \item Source code: https://github.com/jae0/aegis/tree/master/inst/scripts/biochem.r \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{} \begin{figure}[h] \centering \includegraphics[width=0.7\textwidth]{\sab/6.png} \caption{Total number of net deployments for each month during the time period 1999-2014.} \label{fig:AZMPdeploymentsMonthly} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure} \centering \begin{tabular}{cc} \includegraphics[width=0.3\textwidth]{\sab/8.png} \includegraphics[width=0.3\textwidth]{\sab/9.png} \end{tabular} \begin{tabular}{cc} \includegraphics[width=0.3\textwidth]{\sab/10.png} \includegraphics[width=0.3\textwidth]{\sab/11.png} \end{tabular} \caption{Monthly averages of all data from 1999 to 2014: total abundance (top left), total biomass computed from wet weight (top right), ratio of total biomass computed from wet weight to total abundance (bottom left) as a potential measure of the average weight of the individual organism, and abundance of \textit{Calanus finmarchicus}, \textit{Calanus hyperboreus}, and \textit{Calanus glacialis} (botton right) } \label{fig:AZMPBiomassMonthly} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] % \frametitle{} \begin{figure} \centering \includegraphics[width=1.0\textwidth]{\sab/7.png} \caption{Spatial distribution of net deployments included in the zooplankton dataset. } \label{fig:AZMPdeploymentsMonthlyMap} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{Remote Sensing Data: Chlorophyll-a} \begin{itemize} \item Relevance: productivity, habitat, biodiversity and species of interest (in relative order) \item Sampling: Moderate Resolution Imaging Spectroradiometer (MODIS-Aqua; NASA, RSU) \item Spatial coverage: 39 N to 62.5 N and 42 W to 71 W, resolution of 1.5 km \item Temporal coverage: August 2002 to March 2015, 610 quarter-monthly (8-day) composite images \item Source code: \url{https://github.com/jae0/aegis/tree/master/inst/scripts/remoting.sensing.r} \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[ht] \centering \begin{tabular}{cc} \includegraphics[width=0.45\textwidth]{\sab/12.jpg} \includegraphics[width=0.45\textwidth]{\sab/13.jpg} \end{tabular} \begin{tabular}{cc} \includegraphics[width=0.45\textwidth]{\sab/14.jpg} \includegraphics[width=0.45\textwidth]{\sab/15.jpg} \end{tabular} \caption{MODIS semi-monthly Chl-a concentration showing spring bloom progression in the NW Atlantic in 2012. Note the intense bloom at St. Anns Bank during the last two weeks in March.} \label{fig:MapChlaBloomSpring} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{\sab/16.png} \caption{Chlorophyll-a concentration extracted from MODIS 8-day composite images for St.Anns Bank polygon for the time period 2002-2015.} \label{fig:modisChlaTS} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{\sab/17.png} \caption {Average Chorophyll-a concentration computed from 8-day composite images for St.Anns bank polygon for the time period 2002-2015. } \label{fig:ChlaSeasonal} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{Remote Sensing Data: Primary production} \begin{itemize} \item Relevance: productivity, habitat, biodiversity and species of interest (in relative order) \item Sampling: MODIS, ... \item Spatial coverage: 39 N to 62.5 N and 42 W to 71 W, resolution of 1.5 km \item Temporal coverage: July 2002 to December 2014, 150 monthly images \item Source code \url{https://github.com/jae0/aegis/tree/master/inst/scripts/remoting.sensing.r} \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{\sab/18.png} \caption{Annual monthly Primary Production (PP) computed from PP composite images for St.Anns bank polygon for the time period 2002-2014.} \label{fig:ppTSmonthly} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{\sab/19.png} \caption{Average Primary Production (PP) computed from monthly composite images for St.Anns bank polygon for the time period 2002-2014.} \label{fig:ppTSannual} \end{figure} \end{frame} \begin{frame} \frametitle{Remote Sensing Data: Sea Surface Temperature (SST)} \begin{itemize} \item Relevance: productivity, habitat, biodiversity and species of interest (in relative order) \item Sampling: Advanced Very High Resolution Radiometer (AVHRR; NOAA, RSU) \item Spatial coverage: 39 N to 62.5 N and 42 W to 71 W, resolution of 1.5 km \item Temporal coverage: December 1997 to March 2015, 845 8-day composite images \item Source code: \url{https://github.com/jae0/aegis/tree/master/inst/scripts/remoting.sensing.r} \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \begin{tabular}{cc} \includegraphics[width=0.5\textwidth]{\sab/20.jpg} \includegraphics[width=0.5\textwidth]{\sab/21.jpg} \end{tabular} \begin{tabular}{cc} \includegraphics[width=0.5\textwidth]{\sab/22.jpg} \includegraphics[width=0.5\textwidth]{\sab/23.jpg} \end{tabular} \caption{Bi-weekly composites from AVHRR showing SST in the North West Atlantic in the spring of 2012, corresponding to the intense spring bloom at St.Anns bank shown in Figure~\ref{fig:ChlaSeasonal}.} \label{fig:SSTfromAVHRRmap} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{\sab/24.png} \caption{Sea Surface Temperature (SST) extracted from 8-day AVHRR composite images for St.Anns bank polygon for the time period 1997-2015.} \label{fig:SSTfromAVHRRts} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{\sab/25.png} \caption{Average Sea Surface Temperature (SST) computed from 8-day AVHRR composite images for St.Anns bank polygon for the time period 1997-2015.} \label{fig:SstSeasonal} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{Bottom temperatures} \begin{itemize} \item Relevance: productivity, habitat, biodiversity and species of interest \item Sampling: Groundfish survey, snow crab survey, AZMP profiles \item Spatial coverage: full extent, varied sampling \item Temporal coverage: 1950 - present (more historical data present but coverage is variable) \item Source code: https://github.com/jae0/aegis/temperature/tree/master/inst/scripts/01.temperature.R \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame} \begin{figure} \centering \includegraphics[width=0.8\textwidth]{\bd/{aegis/temperature}/maps/SSE/bottom.predictions/global/{temperatures.bottom}.png} \caption{Average bottom temperatures computed from all available data 1950-2016.} \label{fig:TemperatureBottomMap} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{Demersal fish and macro-invertebrates} \begin{itemize} \item Relevance: productivity, habitat, biodiversity and species of interest \item Sampling: Groundfish survey, Snow crab survey \item Spatial coverage \begin{itemize} \item Groundfish: full extent, random stratified, variable number of stations \item Snow crab: Colder water environment, geostatistical grids of $\sim$~10 minutes, $\sim$~400 stations \end{itemize} \item Temporal coverage \begin{itemize} \item Groundfish: 2000 - present (started in 1970, but consistent sampling since 2000) \item Snow crab: 2005 - present (started in 1996, but consistent sampling since 2005) \end{itemize} \item Source code \begin{itemize} \item url{https://github.com/jae0/aegis/tree/master/inst/scripts/groundfish.r} \item url{https://github.com/jae0/bio.snowcrab/tree/master/inst/scripts/1.snowcrab.r} \end{itemize} \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame} \begin{figure}[h] \centering \begin{tabular}{cc} \includegraphics[width=0.5\textwidth]{\analysis maps/{trawl.spatial.density}.pdf} & \includegraphics[width=0.5\textwidth]{\analysis {trawl.time.density}.pdf} \end{tabular} \caption{Left: Survey locations in the Groundfish survey (orange) and snow crab survey (green). Right: Timing of surveys in the Groundfish survey (orange) and snow crab survey (green). } \label{fig:trawlLocationsMap} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \frametitle{Net mensuration issues} \begin{itemize} \item Historical assumption: fixed width (12.5 m); length 3.24 km \item Observed: wing spread, tow length are variable \item Problem: net mensuration not recorded systematically \item Historical data needs to be adjusted as much as possible \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame} \begin{figure}[h] \centering \begin{tabular}{cc} \includegraphics[width=0.5\textwidth]{\sab/numberOfSets.pdf} \includegraphics[width=0.5\textwidth]{\sab/toweddistance.pdf} \end{tabular} \caption{Left: Number of sets in the Groundfish surveys and the number of sets with usable net configuration data. Right: Towed distance comparisons in the groundfish survey.} \label{fig:trawlLocationsMap} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure} \centering \includegraphics[width=0.8\textwidth]{\sab/{wing.v.door.byyear}.pdf} \caption{Net spread variations by year. Note in 2011, the doorspread sensors seem to have failed completely. Note also that wingspread has been significantly larger from 2013 to 2015.} \label{fig:groundfishWingDoorAnnual} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure} \centering \includegraphics[width=0.5\textwidth]{\sab/{wing.v.door}.pdf} \caption{Net spread variations: doorspread vs wingspread. Note also that wingspread has been significantly larger from 2013 to 2015 but not doorspread.} \label{fig:groundfishWingDoorComparison} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame} \begin{figure} \centering \begin{tabular}{cc} \includegraphics[width=0.5\textwidth]{\sab/{sa.comparison}.pdf} & \includegraphics[width=0.5\textwidth]{\sab/{sa.comparison.all}.pdf} \end{tabular} \caption{Left: Surface area estimates based on GSINF logged start-end positions vs computed surface area estimated from tow track and net configuration. Right: Surface area estimates based on GSINF logged start-end positions vs computed surface area estimated from tow track and net configuration \textbf{as well as modeled solutions}.} \label{fig:groundfishSweptArea} \end{figure} \end{frame} \subsection{Fishery activity} % ---------------------------------------------------------------- \begin{frame}[shrink] \frametitle{MARFIS: Fishery footprint} \begin{itemize} \item Relevance: productivity, habitat, biodiversity and species of interest \item Sampling: MarfIS and ZIFF \item Spatial coverage: full extent \item Temporal coverage: 1999 - present \item Source code: \url{https://github.com/jae0/aegis/R/marfissci*} \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{\sab/Rplot01.png} \caption{Commercial catch weights of halibut (\textit{Hippoglossus hippoglossus}) on Georges Bank, the Scotian Shelf, and in the Bay of Fundy.} \label{fig:halibut} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{\sab/Rplot.png} \caption{Commercial catch weights of sea scallops (\textit{Placopecten magellanicus}) on Georges Bank, the Scotian Shelf, and in the Bay of Fundy.} \label{fig:Scallop} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \frametitle{Vessel activity} \begin{itemize} \item Relevance: habitat, biodiversity and species of interest \item Sampling: AIS \item Spatial coverage: Global for satellite AIS, coastal ($\sim$ 100km) for Canadian Coast Guard terrestrial AIS network. \item Temporal coverage: 2013 - present \item Source code: https://github.com/jae0/aegis/tree/master/inst/scripts/ais.r \end{itemize} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{\sab/CCG_AIS.pdf} \caption{Automatic Identification System (AIS) data collected from the Canadian Coast Guard terrestrial network of AIS receiving stations on 08 Dec 2015. A total of 127 vessels were detected in the area with each colour representing a unique vessel.} \label{fig:TAIS} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{\sab/Line_of_Sight.pdf} \caption{Bathymetic (100 m resolution) chart of the St. Anns Bank area with line of sight detection (red circles) for the terrestrial AIS receiving stations (red dots) around St. Anns Bank Area of Interest.} \label{fig:LOF} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{\sab/Simard_Sight.pdf} \caption{Bathymetic (100 m resolution) chart of the St. Anns Bank area with estimated vessel detection distances (blue circles) for the terrestrial AIS receiving stations (blue dots) around St. Anns Bank Area of Interest.} \label{fig:DetAIS} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{\sab/Interpolation_Example1.pdf} \caption{Detected vessel positions (large filled circles) and interpolated vessel positions (lines) for three unique vessels transiting through the St. Anns Bank Area, where each colour represents a unique vessel.} \label{fig:astar} \end{figure} \end{frame} % ---------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \begin{tabular}{cc} \includegraphics[width=0.5\textwidth]{\sab/Counts_2013-2015_Q1.pdf} & \includegraphics[width=0.5\textwidth]{\sab/Counts_2013-2015_Q1_NNF.pdf} \end{tabular} \caption{Vessel density maps for the first quarter of a year based on satellite AIS data from 2013-2015 for all vessels (left panel) and all vessels except of the Newfoundland ferries (right panel).} \label{fig:countmaps} \end{figure} \end{frame} % --------------------------------------------------------------- \begin{frame}[shrink] \begin{figure}[h] \centering \begin{tabular}{cc} \includegraphics[width=0.5\textwidth]{\sab/Cost_2013-2015_Q1.pdf} & \includegraphics[width=0.5\textwidth]{\sab/Cost_2013-2015_Q1_NNF.pdf} \end{tabular} \caption{Cost maps developed for the A$^{\star}$ function to interpolate undetected vessel positions as vessels transit in and out of the Gulf of St. Lawrence.} \label{fig:costmvap} \end{figure} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Data gaps} Expensive and/or difficult to monitor and/or with information that is not readily available at present. \begin{itemize} \item Feeding relationships -- Stomach Database .. not usable \item Pelagic fish (small and large bodied) \item Pelagic invertebrates (e.g., squid, jellyfish) \item Substrate characterisation \item Large marine mammals, reptiles, birds \item Genetic diversity \item Seismic activity \item Pollution \item Ballast water \end{itemize} \end{frame} % --------------------------------------------------------------- \section{Methods} % --------------------------------------------------------------- \begin{frame} \frametitle{Biodiversity} \begin{itemize} \item Complex idea: space, time, organisational/phylogenetic components \item Control space and time (via "rarefaction" and interpolation) and aggregate taxonomic richness $R$ \[ log(R) = B0 + log(SA) + log(TS) + e \] \item Estimate dynamics of $R$ via a modeled solution to logistic state space model. \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame}[shrink] \frametitle{Productivity} \begin{columns}[c] \begin{column}{.6\textwidth} \begin{itemize} \item Complex idea: space, time, biocomplexity (== phylogeny, phenotypes, abundance, size structure, functional groups, growth, etc.) \item Production is not the same as standing biomass (though correlated) \item Control space and time (via interpolation) and aggregate on total and species of interest \item Estimate production via a modeled solution to logistic state space model \end{itemize} \end{column} \begin{column}{.4\textwidth} \begin{figure}[h] \caption{Predicted biomass density of snow crab in Maritimes Region based upon a combination of a Functional-habitat method and simple spatial interpolation.} \label{fig:snowcrabAbundance} \centering \includegraphics[width=1.0\textwidth]{\bd/{bio.snowcrab}/R/maps/{snowcrab.large.males_presence_absence}/snowcrab/climatology/{snowcrab.large.males_presence_absence.mean.climatology.png} \end{figure} \end{column} \end{columns} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Habitat} \begin{itemize} \item Hutchinson's multidimensional niche concept, $H$ \item Extra complex idea: space, time, organisation $\bigotimes$~$H$ \item Two approaches: \begin{itemize} \item \textbf{Functional} -- $H_f$ : binomial model \item \textbf{Integrative} -- $H_i$ : ordination \end{itemize} \item Estimate dynamics of $H_f$ and $H_i$ via a modeled solution to logistic state space model \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Functional-habitat} \begin{itemize} \item $H_f$ $\sim$ Bernoulli process (environmental factors/gradients) \item Easily modeled \item Make increasingly more precise habitat definitions by adding more environmental and biological factors for increasingly more precise categories of organisms \item Problem: due to natural complexity, such models will also, always be \textbf{incomplete} \item Problem: determinism .. how do we put the egg back together again? \item Suitability: species of interest as very specific \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame}[shrink] \frametitle{Functional-habitat} \begin{columns}[c] \begin{column}{.5\textwidth} \begin{figure}[h] \caption{Predicted probability of observing snow crab in Maritimes Region based upon a Functional-habitat method.} \label{fig:snowcrabHabitat} \centering \includegraphics[width=1.0\textwidth]{\bd/{bio.snowcrab}/R/maps/{snowcrab.large.males_presence_absence}/snowcrab/climatology/{snowcrab.large.males_presence_absence.mean.climatology}.png} \end{figure} \end{column} \begin{column}{.5\textwidth} \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{\bd/{bio.snowcrab}/assessments/2012/timeseries/interpolated/{snowcrab.habitat.sa}.png} \caption{Surface area of potential habitat of snow crab in Maritimes Region based upon a Functional-habitat method.} \label{fig:snowcrabHabitatTS} \end{figure} \end{column} \end{columns} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Integral habitat} \begin{itemize} \item \textbf{Assumption}: Organism community found living in a location indicates/reflects the local habitat \item That is, the biotic community fully integrates all relevant biological and environmental interactions, short and long time and space scales, instantaneous and cumulative \item Characterising taxonomic composition will estimate "habitat space" \item $H_i$ $\sim$ Eigenanalysis of: \begin{itemize} \item Correlational structure (Principal Components Analysis) \item Chi-squared differences (Correspondence Analysis) \end{itemize} \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Integral habitat} \begin{figure}[h] \centering % \includegraphics[width=0.75\textwidth]{\bd/{aegis}/speciescomposition/analysis/SSE/allseasons/complex/maps/{maps.pca1.2010}.png} \includegraphics[width=1.0\textwidth]{\mpa oneoffs/ca.png} \caption{Integral habitat based upon species composition variations in Maritimes Region. Note the first (left) is primarily a temperature gradient expressed through species composition and the second (right) is a depth-related gradient in species assemblages.} \label{fig:speciesCompostionMap} \end{figure} \end{frame} % --------------------------------------------------------------- \begin{frame}[shrink] \frametitle{Connectivity: spatial scale} \begin{itemize} \item MPAs exist in a spatial context \item Spatial scale ($S_s$) will determine which processes will be relevant \item If $S_s$ is small relative to the size of an MPA, the chances of the MPA having an influence is enhanced: i.e., short-range processes dominate (e.g., less mobile species, weakly dispersing, low currents, habitat heterogeneity at small scales). \item If $S_s$ is large relative to the size of an MPA, then it would mean that broader/larger processes were influencing the productivity of the species (e.g., higher mobility or dispersal processes/current, and stronger spatial connectivity, habitat heterogeneity at larger scales) -- resulting in a lower likelihood of the MPA having an influence upon the species or components of interest. \item Monitoring and assessment must respect the spatial scales implicated if one is to resolve such patterns, with a focus upon scales > 1 km \item Measure via semi-variogram \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Semi-variogram} \begin{itemize} \item Spatial stochastic process, $y$ defined at some location $x$ \item Then the variability of $y$ changes as a function of distance $\Delta x$: \end{itemize} \begin{eqnarray*} \label{eq:semivariogram} \gamma(x) &=& \frac{1}{2} \ \textrm{Var} [ y(x) - y(x + \Delta x)] \\ \end{eqnarray*} DEFINE: $S_s$ = distance at which the variance increases asymptotically to $\sim~0.75\%$ of the total variance. \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Semi-variogram} \begin{columns}[c] \begin{column}{.5\textwidth} \begin{figure} \centering \includegraphics[width=1.0\textwidth]{\sab/matern.jpg} \caption{Mat\'{e}rn semivariance as a function of distance for different values of $\nu$.} \label{fig:matern} \end{figure} \end{column} \begin{column}{.5\textwidth} \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{\mpa oneoffs/{range.bathy}.jpg} \caption{First estimate of log(spatial range; km) based upon depth variations.} \label{fig:spatialrangeBathy} \end{figure} \end{column} \end{columns} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Connectivity: temporal scale} \begin{itemize} \item MPAs exist in a temporal context \item Temporal scale will determine which processes will be relevant and intensity of monitoring effort \item Short-range variations require higher sampling effort to resolve/understand the issues and vice-versa. \item Measure: cumulative periodogram -- the cumulative variance found at different wavelengths ($\omega$). It is a discrete sample estimate of the continuous concept of spectral density $\gamma(t)$ \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Cumulative periodogram} \begin{itemize} \item Temporal process, $y$ defined at some time $t$ \item Then the variability of $y$ changes as a function of wavelength $\omega x$: \end{itemize} \begin{equation} \label{eq:spectraldDensity} \gamma(t) = \int_{-1/2}^{1/2} e^{2\pi i \omega t} f(\omega) d \omega \end{equation} DEFINE: Temporal scale = time at which the cumulative variance increases asymptotically to $\sim~0.75\%$ of the total variance. \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Connectivity: Space-time models} \begin{itemize} \item Spatial and temporal patterns coexist and are correlated \item Recent methods, using a Markov Random Field representation of $y$ permits some interesting classes of space-time models to be parameterized that can model $\gamma(t)$ and $\gamma(x)$ simultaneously. \item Another class of models known as Stochastic spatio-temporal simulation models, also known as "birth-death models" and shows much promise. \item Problem: both are still computationally demanding. \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Connectivity: Tagging, mark-recapture} \begin{itemize} \item Movement: Acoustic tags (OTN, EMERA, snow crab industry, Oceans) \item Mark-recapture studies: sea turtles, seals, sharks ... Data gaps \item Genetic connectivity (no work) .. Data gap \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Risk modeling} \begin{itemize} \item Risk = probability inference \item Methods of error propagation \begin{itemize} \item Deterministic: Mechanistic model $\rightarrow$ approximations or simulations $\rightarrow$ error distribution of $y$ \item Phenomenological: Aggregate statistical model $\rightarrow$ statistical models $\rightarrow$ error distribution of $y$ \end{itemize} \item Focus upon the phenomenological approach \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Logistic model} Describe system state, $y$ (e.g., aggregate biomass, biodiversity, habitat, etc) as a logistic model. The discrete form of the basic normalised logistic equation is: \begin{equation} \label{eqLogisticDiscrete} y_t \approx r y_{t-1} (1 - y_{t-1} ) \end{equation} with parameters, $\theta=\{r,K\}$. A state space representation where an additional observation model: \begin{equation} \label{eqLogisticDiscreteStateSpace} O_t = q y_t \end{equation} with parameters, $\theta=\{r,K,q\}$. \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Estimation method} \begin{itemize} \item Bayesian approach to solve the above nonlinear state space problem \begin{itemize} \item Greater numerical stability \item Incorporate prior scientific knowledge in a formal manner \item Realistically propagate credible errors \item Estimate unobserved states ("true" states) \item Simultaneously estimate model "process" errors ($\sigma^2_p$) and data "observation" errors ($\sigma^2_o$). \end{itemize} \item MCMC (Gibbs) sampling using the JAGS platform (Plummer 2003, 2010) \end{itemize} \end{frame} % --------------------------------------------------------------- \begin{frame} \frametitle{Anthropogenic threats and pressures} \begin{itemize} \item Trawling and dredging disturbances \item Exploitation of marine resources by fisheries \item Fishing-gear entanglement threats to marine mammals and sea turtles \item Vessel collision threats due to marine traffic \item Vessel-noise disturbances \end{itemize} Question: How to express the cumulative impacts on productivity, habitat, biodiversity, and endangered or threatened species? One possible way forward: normalise each threat on a zero-one scale to compare the intensity of threats across the region weighted and combined to examine cumulative anthropogenic threats. Others? \end{frame} % --------------------------------------------------------------- \begin{frame} More to come... \end{frame} \end{document}
{ "alphanum_fraction": 0.6534665412, "avg_line_length": 36.0966898955, "ext": "tex", "hexsha": "5c070a71d461265b71d3fcbbaceaaa905afb4993", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-05-05T14:24:49.000Z", "max_forks_repo_forks_event_min_datetime": "2020-05-05T14:24:49.000Z", "max_forks_repo_head_hexsha": "b7a9034460cc43ed047fd35bcf1ce5177f850e3b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jae0/aegis.mpa", "max_forks_repo_path": "inst/doc/tex/sab/sab_presentation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b7a9034460cc43ed047fd35bcf1ce5177f850e3b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jae0/aegis.mpa", "max_issues_repo_path": "inst/doc/tex/sab/sab_presentation.tex", "max_line_length": 419, "max_stars_count": null, "max_stars_repo_head_hexsha": "b7a9034460cc43ed047fd35bcf1ce5177f850e3b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jae0/aegis.mpa", "max_stars_repo_path": "inst/doc/tex/sab/sab_presentation.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 10769, "size": 41439 }
% --- [ Criteria for Selection of Test Programs ] ------------------------------ \subsection{Criteria for Selection of Test Programs} \label{sec:criteria_for_selection_of_test_programs} As part of a broader Open Science movement, the test data\footnote{Test data: \url{https://github.com/decomp/testdata}} and source code\footnote{Source code: \url{https://github.com/decomp/decomp}} of this research project are released open source. To facilitate reproducible evaluation results, the test programs must be open source and explicitly tracked by a specific version number or version control revision, and they must be provided free of charge to the wider public. While unfortunate, this excludes test programs from the Standrad Performance Evaluation Corporation (\url{http://spec.org}) as those test programs are not made available free of charge to the wider public. % TODO: Update the version numbers of SQLite and Coreutils. For coverage of real world applications, the shell from the SQLite project (version 3.25.0) and 107 tools from the GNU Core Utilities project (version 8.30) are included in the test data. The scientific community have in recent years converged on utilizing GNU Core Utilities to evaluate control flow recovery methods \cite{no_more_gotos,homan_centric_decompilation,interprocedural_control_flow_recovery,semantics_preserving_structural_analysis,revng}. % TODO: remove paragraph about pathological test programs? %The control flow recovery methods will also be evaluated on pathological test programs which have been automatically generated to contain hundreds or thousands of nested control flow primitives. % TODO: Add specific revision for testdata repo.
{ "alphanum_fraction": 0.8028335301, "avg_line_length": 89.1578947368, "ext": "tex", "hexsha": "6d9144e3513546ad1b459ab4bf0b20aeda2f05bd", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2019-09-09T07:36:14.000Z", "max_forks_repo_forks_event_min_datetime": "2019-05-25T21:15:26.000Z", "max_forks_repo_head_hexsha": "fb82b6a5074aa8721afb24a5537bf1964ed20467", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "decomp/doc", "max_forks_repo_path": "report/control_flow_analysis/sections/2_methodology/2_criteria_for_selection_of_test_programs.tex", "max_issues_count": 48, "max_issues_repo_head_hexsha": "fb82b6a5074aa8721afb24a5537bf1964ed20467", "max_issues_repo_issues_event_max_datetime": "2020-01-29T19:17:53.000Z", "max_issues_repo_issues_event_min_datetime": "2019-01-30T19:08:59.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "decomp/doc", "max_issues_repo_path": "report/control_flow_analysis/sections/2_methodology/2_criteria_for_selection_of_test_programs.tex", "max_line_length": 452, "max_stars_count": 23, "max_stars_repo_head_hexsha": "fb82b6a5074aa8721afb24a5537bf1964ed20467", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "decomp/doc", "max_stars_repo_path": "report/control_flow_analysis/sections/2_methodology/2_criteria_for_selection_of_test_programs.tex", "max_stars_repo_stars_event_max_datetime": "2021-09-16T08:14:04.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-27T10:16:40.000Z", "num_tokens": 340, "size": 1694 }
\title{AI High-Performance Solution on FPGA} \team{Nico Canzani, Dominik M\"uller} \client{Inst. for Sensors and Electronics} \coaches{% Prof. Michael Pichler,\\ Prof. Dr. Hanspeter Schmid } \repo{https://git.io/p5-aionfpga} \fssummary{ In a world of self-driving cars and automated quality control in manufacturing, real-time image classification is becoming increasingly important. Artificial intelligence (AI), and deep learning in particular, are achieving excellent classification accuracies. However, the training of a convolutional neural network (CNN) requires a sufficiently large labeled dataset. } \fsgraphics{ \begin{minipage}[t]{0.5\textwidth} \begin{minipage}[t]{0.98\textwidth} \includegraphics[width=\textwidth]{graphics/1574952009_278_10_stuffed-bunny.png} \graphicscaption{Image of the \textit{Stuffed Bunny}} \end{minipage} \end{minipage}% \begin{minipage}[t]{0.5\textwidth} \begin{flushright} \begin{minipage}[t]{0.98\textwidth} \includegraphics[width=\textwidth]{graphics/1574943825_125_8_hand-featherball.png} \graphicscaption{Image of the \textit{Hand Featherball}} \end{minipage} \end{flushright} \end{minipage} } \fscontent{ \section{Difficulty} Collecting a sufficiently large dataset of pictures is not trivial. When done manually, the camera has to be started and stopped for each throw. This likely creates many empty frames at the beginning and at the end of the capture. Those empty and therefore invalid frames must be removed to avoid errors during the training of the CNN later on. Furthermore, each valid frame must be labeled. This procedure is very error-prone if it is performed manually for each individual frame. \newcol \section{Software} To simplify the image collection, a camera throw detection mechanism is implemented and various Python scripts are used. The goal of the throw detection is to extract valid frames of a throw --- with objects on them --- from the continuous data stream of the camera. Therefore, the throw detection needs to work in real time. At a frame rate of \SI{200}{fps}, there is less than \SI{5}{ms} to process a single frame. Due to this time constraint, a simple image change detection algorithm is implemented. \newcol \section{Dataset} The dataset contains images of 22 different throwing objects, ranging from sports equipment to toys. It is fully labeled and consists of more than \num{15000} usable images with at least 480 images of each object. The employed software made it possible to collect all images over the course of only two days. The above images are two examples from the labeled dataset. The first one shows the \textit{Stuffed Bunny} and the second one shows the \textit{Hand Featherball}. } \infobox{Throwing Booth}{ \begin{minipage}{0.65\textwidth} The throwing booth is constructed from general-purpose aluminium profiles. Due to its robust impact strength, the rear panel is made of a white ABS plastic sheet and has a target hole. The white side panel serves as a consistent background for the images. It is made of a foamed PVC sheet with a fine-textured surface to reduce light reflections. The image acquisition system consists of a Baumer industrial camera combined with a suitable lens. Strong diffuse lighting is used to minimize the required exposure time and to illuminate the side panel as evenly as possible. Finally, a monitor is used to display the detected object. \end{minipage}\hfill% \begin{minipage}{0.32\textwidth} \includegraphics[width=\textwidth]{graphics/top_assembly.png} \end{minipage} }
{ "alphanum_fraction": 0.7602497964, "avg_line_length": 47.8311688312, "ext": "tex", "hexsha": "b5f171f373e6af909e93a222b089ddbf3b0a1b2b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "13fc60fb973a4a87a4af1b49c17d5dd1fd239ed5", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "MuellerDominik/P5-AIonFPGA", "max_forks_repo_path": "doc/fact-sheet/content.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "13fc60fb973a4a87a4af1b49c17d5dd1fd239ed5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "MuellerDominik/P5-AIonFPGA", "max_issues_repo_path": "doc/fact-sheet/content.tex", "max_line_length": 148, "max_stars_count": 1, "max_stars_repo_head_hexsha": "13fc60fb973a4a87a4af1b49c17d5dd1fd239ed5", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "MuellerDominik/P5-AIonFPGA", "max_stars_repo_path": "doc/fact-sheet/content.tex", "max_stars_repo_stars_event_max_datetime": "2020-01-26T15:54:09.000Z", "max_stars_repo_stars_event_min_datetime": "2020-01-26T15:54:09.000Z", "num_tokens": 899, "size": 3683 }
\section{Tax Reform} \label{sec:tax-reform} Taxes in America should be progressive and efficient. Today they are neither~---~or at least, not enough. We have flattened the traditionally progressive income tax substantially. We have capped Social Security contributions at a regressively low amount. We tax corporations unequally and unfairly~---~and as economists have shown, regressively. And we have one of the most complex tax codes generally in the world. But many of these problems tie directly to the corrupted system that we have in Con\-gress right now. The rich are the most important contributors to political campaigns. That produces an arms race among politicians to keep their taxes low. Corporations in America face a high nominal tax rate, but when all the exceptions and deductions are accounted for, it is one of the lowest rates in the world. Those exceptions and deductions are opportunities for congressmen to raise money. And the same with the complexity of taxes generally: every loophole is a fundraising opportunity. Our tax code is quickly becoming a device to raise money --- not for the United States Treasury, but for political campaigns. I would work to radically simplify the tax code, while enhancing its progressivity. I would support abolishing the carried interest exemption that permits some of the richest Americans (hedge fund managers) to pay among the lowest tax rates. I would consider adding a higher marginal rate for individuals making more than \$1M a year. I would support abolishing the contribution cap for Social Security, applying the same Social Security rate to all levels of income. I would consider a proposal to offset any reduction in corporate tax with increased progressivity in income tax. All of these changes follow directly from the principle of an efficient, progressive tax system. But they all will be vigorously opposed by the interests that dominate Washington right now. After we enact The First Reform, these changes will be easier to achieve.
{ "alphanum_fraction": 0.8052139695, "avg_line_length": 72.6071428571, "ext": "tex", "hexsha": "c2c8db0515eeaa9a87ea280890622f13e6e3bd8e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "46129982ed035f109b69207f41b19a91a258a726", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "Hightor/Lessig2016", "max_forks_repo_path": "Sections/TaxReform.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "46129982ed035f109b69207f41b19a91a258a726", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "Hightor/Lessig2016", "max_issues_repo_path": "Sections/TaxReform.tex", "max_line_length": 160, "max_stars_count": null, "max_stars_repo_head_hexsha": "46129982ed035f109b69207f41b19a91a258a726", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "Hightor/Lessig2016", "max_stars_repo_path": "Sections/TaxReform.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 408, "size": 2033 }
%!TEX root = ../dissertation_vkslm.tex \chapter{Offline Signature Generation From Dynamic Data}\label{ch:method}
{ "alphanum_fraction": 0.7807017544, "avg_line_length": 28.5, "ext": "tex", "hexsha": "3610494ab5a3dae73dda8edf18ea4bdd11bbb05e", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-02-19T15:39:50.000Z", "max_forks_repo_forks_event_min_datetime": "2021-02-19T15:39:50.000Z", "max_forks_repo_head_hexsha": "942bd6e57796d760e152dbfcc31745950dc3fd32", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "victormelo/dissertation", "max_forks_repo_path": "conteudo/04-method.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "942bd6e57796d760e152dbfcc31745950dc3fd32", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "victormelo/dissertation", "max_issues_repo_path": "conteudo/04-method.tex", "max_line_length": 73, "max_stars_count": 1, "max_stars_repo_head_hexsha": "942bd6e57796d760e152dbfcc31745950dc3fd32", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "victormelo/dissertation", "max_stars_repo_path": "conteudo/04-method.tex", "max_stars_repo_stars_event_max_datetime": "2021-02-19T15:39:48.000Z", "max_stars_repo_stars_event_min_datetime": "2021-02-19T15:39:48.000Z", "num_tokens": 29, "size": 114 }
% --- [ Clang Example ] -------------------------------------------------------- \subsection{Clang Example} \label{app:clang_example} The Clang compiler supports emitting LLVM IR from C source code. Using the Clang compiler and the LLVM IR optimiser of the LLVM compiler framework, the source code of the simple C program presented in listing~\ref{lst:example1_c} was translated into the LLVM IR assembly presented in listing~\ref{lst:example1_ll}. \lstinputlisting[language=C, style=go, caption={The source code of a simple C program which iterates over a pre-test loop to conditionally increment an accumulator. The final value of the accumulator \texttt{x} determines the status code of the program. \label{lst:example1_c}}]{inc/appendices/clang_example/example1.c} \lstinputlisting[language=llvm, style=nasm, caption={An optimised version of the LLVM IR assembly, which was emitted by Clang when compiling the C source code of listing~\ref{lst:example1_c}. \label{lst:example1_ll}}]{inc/appendices/clang_example/example1.ll}
{ "alphanum_fraction": 0.7531461762, "avg_line_length": 93.9090909091, "ext": "tex", "hexsha": "109b303104fb07caca63624401b06cde5425ef2d", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2019-09-09T07:36:14.000Z", "max_forks_repo_forks_event_min_datetime": "2019-05-25T21:15:26.000Z", "max_forks_repo_head_hexsha": "fb82b6a5074aa8721afb24a5537bf1964ed20467", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "decomp/doc", "max_forks_repo_path": "report/compositional_decompilation/sections/appendices/h_clang_example.tex", "max_issues_count": 48, "max_issues_repo_head_hexsha": "fb82b6a5074aa8721afb24a5537bf1964ed20467", "max_issues_repo_issues_event_max_datetime": "2020-01-29T19:17:53.000Z", "max_issues_repo_issues_event_min_datetime": "2019-01-30T19:08:59.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "decomp/doc", "max_issues_repo_path": "report/compositional_decompilation/sections/appendices/h_clang_example.tex", "max_line_length": 319, "max_stars_count": 23, "max_stars_repo_head_hexsha": "fb82b6a5074aa8721afb24a5537bf1964ed20467", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "decomp/doc", "max_stars_repo_path": "report/compositional_decompilation/sections/appendices/h_clang_example.tex", "max_stars_repo_stars_event_max_datetime": "2021-09-16T08:14:04.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-27T10:16:40.000Z", "num_tokens": 243, "size": 1033 }
\subsection{Rock Paper Scissors (RPS)} The first game is a simple rock paper scissors solver that given two images decides the winner. The possible options are tie, player 1 wins, player 2 wins. Example images, source \cite{RPSLS-database}, can be found in figure \ref{fig:rps_input}. There are 1440 training examples. A set of 360 examples are used to calculate the model's accuracy. \begin{figure}[htp] \centering \includegraphics[width=.3\textwidth]{figures/input/paper.jpg}\hfill \includegraphics[width=.3\textwidth]{figures/input/rock.jpg}\hfill \includegraphics[width=.3\textwidth]{figures/input/scissors.jpg} \caption{Example of input pictures of the hand gestures: paper, rock and scissors\cite{RPSLS-database}.} %%TODO import ref of pictures source \label{fig:rps_input} \end{figure} \paragraph{Implementation details:} For the DeepProbLog implementation, a similar approach as \cite{DBLP} has been taken: \begin{itemize} \item Cross-entropy loss between the predicted and the desired outcomes \item The network architecture: 2 convolutional layers with kernel size 5, and respectively 6 and 16 filters, both followed with a maxpool-layer of size 2 and stride 2 which are also both followed by the activation layer ReLU. These are followed by 3 linear layers 120, 84 and 3, The first 2 layers are followed by the activation layer ReLU and the last by a softmax layer. \item The learning rate has been set to 0.0001. \item Adam \cite{kingma2014adam} optimization for the neural networks, SGD for the logic parameters is used. \end{itemize} In listing \ref{lst:rps-logic} the logical model can be found for the DeepProbLog model. \begin{lstlisting}[label={lst:rps-logic},language=Prolog,frame=single,caption={Rock paper scissors DeepProbLog model},captionpos=b] nn(rps_net,[X],Y,[paper,scissors,rock]) :: sign(X,Y). rps(X,Y,0) :- sign(X,Z), sign(Y,Z). rps(X,Y,1) :- sign(X,paper), sign(Y,rock). rps(X,Y,2) :- sign(X,paper), sign(Y,scissors). rps(X,Y,2) :- sign(X,rock), sign(Y,paper). rps(X,Y,1) :- sign(X,rock), sign(Y,scissors). rps(X,Y,1) :- sign(X,scissors), sign(Y,paper). rps(X,Y,2) :- sign(X,scissors), sign(Y,rock). \end{lstlisting} The implementation and architecture for the CNN model, which is used to compare the DeepProbLog network, is similar to the DeepProbLog model. The outcomes of the last layer, however, represent the winner of the game (3 options) instead of the hand gesture (also 3 options). \subsubsection{Results} The loss and accuracy over the iterations of both models are plotted in figure \ref{fig:rps_output}. We can clearly see that the DeepProbLog has an advantage over the CNN model. The CNN model reaches 100\% accuracy over 1350 iterations in 24.2 seconds while the DeepProbLog example reaches it in 350 iterations in 18.7 seconds. \begin{figure}[h] \centering \begin{subfigure}[b]{0.45\textwidth} \begin{tikzpicture} \begin{axis}[xlabel=Iterations,ylabel=Loss] \addplot[thin,red] table [x=i, y=loss, col sep=comma] {results/RPS/RPS_BaseLine_loss.log}; \addplot[thin,blue] table [x=i, y=loss, col sep=comma] {results/RPS/RPS_Problog_loss.log}; \end{axis} \end{tikzpicture} \caption{Loss of both networks over the number of iterations} \end{subfigure} \hfill \begin{subfigure}[b]{0.45\textwidth} \begin{tikzpicture} \begin{axis}[xlabel=Iterations,ylabel=Accuracy] \addplot[thin,red] table [x=i, y=Accuracy, col sep=comma] {results/RPS/RPS_BaseLine_accuracy.log}; \addplot[thin,blue] table [x=i, y=Accuracy, col sep=comma] {results/RPS/RPS_Problog_accuracy.log}; \end{axis} \end{tikzpicture} \caption{Accuracy of both networks over the number of iterations} \end{subfigure} \caption{Performance (loss and accuracy) of both networks over the number of iterations: blue for DeepProbLog, red for CNN} \label{fig:rps_output} \end{figure}
{ "alphanum_fraction": 0.7084654794, "avg_line_length": 61.1791044776, "ext": "tex", "hexsha": "b5c42986565fe6b3985830e13521656eee66a70a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "17578b2c1560659fac8c68498b8d45622860beb4", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "AytugAltin/ProblogRPSLS", "max_forks_repo_path": "RPSLS_Paper/section/results/RPS.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "17578b2c1560659fac8c68498b8d45622860beb4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "AytugAltin/ProblogRPSLS", "max_issues_repo_path": "RPSLS_Paper/section/results/RPS.tex", "max_line_length": 426, "max_stars_count": null, "max_stars_repo_head_hexsha": "17578b2c1560659fac8c68498b8d45622860beb4", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "AytugAltin/ProblogRPSLS", "max_stars_repo_path": "RPSLS_Paper/section/results/RPS.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1132, "size": 4099 }
\section{Intoduction} \label{sec:introduction} The program often fails. To sufficiently understand and prevent failures, developers requires firstly reproduce these bugs, which ensures the same output and bugs. However, directly re-exection is not suitable for non-deterministic failures, as they may not appear in a re-execution procedure. Non-deterministic failures are the consequence of non-deterministic instructions. Instructions for running a program can be divided into two categories. One is deterministic, which means the behavior of deterministic instruction is determined in each execution. The other type is non-deterministic, meaning that execution in different situations will have different results. Although most of the CPU execution is deterministic (e.g., \texttt{ADD}), non-deterministic instructions (e.g., get user input) are also pervasive. Typical sources of nondeterminism include system calls, interrupts, signals, and data races for concurrency programs \cite{ronsse_recplay_1999}. All these non-deterministic events can be futher classified into two types: inconstancy of the data flow - for example, certain system calls such as \texttt{getrandom} and \texttt{getpid}, and inconstancy of the control flow - for example, concurrency bug due to memory access in inconsistent order \cite{getrandom2}. Record-and-replay is a type of approaches that addresses this challenge. Most Record-and-replay systems work by first recording non-deterministic events during the original run of a program and then substituting these records during subsequent re-execution. Record-and-replay system could ultimately guarante that each replay will be identical with the initial version. The fact that a number of replay systems have been built and put into use in recent years illustrates the value of record-and-replay systems in practice \cite{203227,replay_survey,altekar_odr_2009,bhansali_framework_2006}. % There are several ways to capture calls online at runtime: \textbf{PinPlay}, \textbf{REPT}, \textbf{rr} There is a rich amount of research on record-and-replay systems, and we can find their various treatments of non-deterministic records. Early record-and-replay systems tend to use virtualization techniques so as to observe and record the entire program non-deterministically on the hypervisior, but the virtual machine is very heavy \cite{dunlap_revirt_2003, dunlap_smp-revirt_2008}. Some systems use dynamic binary instrumentation to get the results after running each instruction, but this is very inefficient \cite{bhansali_framework_2006}. There are some other systems that choose not to record at runtime in order to address the expensive cost of recording; instead, they infer these non-deterministic events based on the control flow and other information collected \cite{altekar_odr_2009,cui_rept_2018}. However, inference often does not reproduce program execution as faithfully as records, and the time required for inference, which in the worst case is a search of the entire space, is a problem \cite{replay_survey}. There are also systems that use custom hardware, which inevitably affects its usefulness in practice \cite{montesinos_capo_2009}. Recently there have been some practical systems that have adopted tools provided by Linux for tracing, thus achieving better efficiency. Nevertheless, it still introduces a considerable overhead (50\%) and is therefore only available when the developer exactly needs to record and replay \cite{203227}. This thesis focuses on the data record part of record-and-replay systems, precisely, the recording of non-deterministic events caused by system calls. We argue that a \textit{practical} record system should (1) run online, meaning that the recording has little performance impact on the execution of the target program, (2) log all data without any omission, (3) work on commercial off-the-shelf hardware, (4) not require any modification to the target program, and also (5) not require any modification to the kernel. In this thesis, we propose \TheName, a practical solution for syscall capturing. It works with unmodified Linux programs on commercial off-the-shelf (OTS) hardware. My original design was on the ARM platform, but the system can be applied to other platforms as well (e.g. x86, RISC-V). we demonstrat its usefulness on both x86 and ARM platforms. \TheName consists of three component: \CoreHook, \Filter and \RecordBuffer. The \CoreHook is a probe of system call. \CoreHook inspects each system call, and collects the effects on memory and registers by considering the semantics of system calls. The \Filter stores relevant information of the process what issues the system call, and compares this information with the characteristics specified by the developer. The \RecordBuffer temporarily store the recording of system calls and dumps it to file. We implement a prototype of \TheName{} and evaluate it with the aforementioned requirements in mind. The evaluation results show that \TheName{} completely records system calls. % We also leverage \TheName{} to diagnose 16 failed programs (7 code segments % reconstructed from application and 9 real-world applications including Python, Memcached, % and SQLite). The diagnosis indicates that \TheName{} effectively identifies the root cause % of the failures caused by concurrency and sequential bugs We also leverage \TheName to record 16 failed programs (7 code segments reconstructed from application and 9 real-world applications including Python, Memcached, and SQLite). The recording indicates that \TheName effectively records system calls with a performance overhead of up to 5.3\% on average. Meanwhile, \TheName{} directly works on the unmodified binary of the target program and does not rely on any hardware modification. % The main challenge confornt to \TheName is how to % we implement \TheName in three components. The hook component is a couple of callback functions that hooks the entrance and exit of each system call. The filter component are functions In summary, we make the following contributions: \begin{itemize} \item We present a system call recording tool named \TheName{} on ARM platforms, which works with unmodified binary on ARM platform without hardware modification. %, thus %suitable for in-production deployment. \item We achieve high performance that allows the always-on trace for the production environment, which provides \TheName{} the ability to reconstruct the entire records. \item We implement a prototype of \TheName{} and evaluate it with real-world applications. The evaluation result demonstrates that \TheName{} successfully records various types of applications with up to 5.3\% runtime performance overhead on average. \end{itemize}
{ "alphanum_fraction": 0.8053032523, "avg_line_length": 75.8444444444, "ext": "tex", "hexsha": "5da6ff022c2bad50a601f8af9b7f139ab99b0007", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "587b3fa0d4fc9cde4de41202fc5d76af58c765fb", "max_forks_repo_licenses": [ "LPPL-1.3c" ], "max_forks_repo_name": "Tert-butyllithium/sustechthesis", "max_forks_repo_path": "sections/examples/introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "587b3fa0d4fc9cde4de41202fc5d76af58c765fb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "LPPL-1.3c" ], "max_issues_repo_name": "Tert-butyllithium/sustechthesis", "max_issues_repo_path": "sections/examples/introduction.tex", "max_line_length": 519, "max_stars_count": null, "max_stars_repo_head_hexsha": "587b3fa0d4fc9cde4de41202fc5d76af58c765fb", "max_stars_repo_licenses": [ "LPPL-1.3c" ], "max_stars_repo_name": "Tert-butyllithium/sustechthesis", "max_stars_repo_path": "sections/examples/introduction.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1489, "size": 6826 }
% 9.5.07 % This is a sample documentation for Compass in the tex format. % We restrict the use of tex to the following subset of commands: % % \section, \subsection, \subsubsection, \paragraph % \begin{enumerate} (no-nesting), \begin{quote}, \item % {\tt ... }, {\bf ...}, {\it ... } % \htmladdnormallink{}{} % \begin{verbatim}...\end{verbatim} is reserved for code segments % ...'' % \section{Explicit Copy} \label{ExplicitCopy::overview} This test detects missing copy constructors and operators. In case the user wants to use the default ones then the class has to be annotated with a special comment. These comments should contain ``\texttt{use default copy constructor}'' or ``\texttt{use default copy operator}''. This checker enforces the rule 53 from H. Sutter, A. Alexandrescu \emph{C++ Coding Standards}: ``Explicitly enable or disable copying''. \subsection{Parameter Requirements} No parameter is required. \subsection{Non-Compliant Code Example} \begin{verbatim} class A { }; \end{verbatim} \subsection{Compliant Solution} \begin{verbatim} class A { public: A(const A& other) { } A& operator=(const A& other) { return this; } }; \end{verbatim} \subsection{Mitigation Strategies} \subsubsection{Static Analysis} Compliance with this rule can be checked using structural static analysis checkers using the following algorithm: \begin{enumerate} \item For all class definitions, try to find a copy constructor and a copy operator, or user comments describing that the class should use the default ones. \end{enumerate} \subsection{References} Alexandrescu A. and Sutter H. {\it C++ Coding Standards 101 Rules, Guidelines, and Best Practices}. Addison-Wesley 2005.
{ "alphanum_fraction": 0.7372732592, "avg_line_length": 25.5074626866, "ext": "tex", "hexsha": "f888ddeb122d63963f871a758617a825e1db337c", "lang": "TeX", "max_forks_count": 146, "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_path": "projects/compass/extensions/checkers/explicitCopy/explicitCopyDocs.tex", "max_issues_count": 174, "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_path": "projects/compass/extensions/checkers/explicitCopy/explicitCopyDocs.tex", "max_line_length": 120, "max_stars_count": 488, "max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "maurizioabba/rose", "max_stars_repo_path": "projects/compass/extensions/checkers/explicitCopy/explicitCopyDocs.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "num_tokens": 432, "size": 1709 }
\documentclass[pdflatex,compress,9pt, xcolor={dvipsnames,dvipsnames,svgnames,x11names,table}, hyperref={colorlinks = true,breaklinks = true, urlcolor = NavyBlue, breaklinks = true}]{beamer} \usetheme{Goettingen} \usepackage[utf8]{inputenc} \usepackage[T2A,T1]{fontenc} \usepackage{gensymb} % degree symbol \usepackage[super]{nth} % ---------------------------------------------------------------------------- % *** START BIBLIOGRAPHY <<< % ---------------------------------------------------------------------------- \usepackage[ backend=biber, style = numeric, % style=nature, % style=science, % style=apa, % style=mla, % style=phys, maxbibnames=99, % citestyle=authoryear, citestyle=numeric, giveninits=true, isbn=true, url=true, natbib=true, sorting=ndymdt, bibencoding=utf8, useprefix=false, language=auto, autolang=other, backref=true, backrefstyle=none, indexing=cite, ]{biblatex} \DeclareSortingTemplate{ndymdt}{ \sort{ \field{presort} } \sort[final]{ \field{sortkey} } \sort{ \field{sortname} \field{author} \field{editor} \field{translator} \field{sorttitle} \field{title} } \sort[direction=descending]{ \field{sortyear} \field{year} \literal{9999} } \sort[direction=descending]{ \field[padside=left,padwidth=2,padchar=0]{month} \literal{99} } \sort[direction=descending]{ \field[padside=left,padwidth=2,padchar=0]{day} \literal{99} } \sort{ \field{sorttitle} } \sort[direction=descending]{ \field[padside=left,padwidth=4,padchar=0]{volume} \literal{9999} } } \addbibresource{YO.bib} \renewcommand*{\bibfont}{\tiny} %\scriptsize \footnotesize \setbeamertemplate{bibliography item}{\insertbiblabel} % ---------------------------------------------------------------------------- % *** END BIBLIOGRAPHY <<< % ---------------------------------------------------------------------------- \title{Satellite Image Based Mapping of Wetland Tundra Landscapes Using ILWIS GIS} \author{Polina Lemenkova} \date{March 19, 2015} \begin{document} \begin{frame} \titlepage \end{frame} \section*{Outline} \begin{frame} \tableofcontents \end{frame} \section{Introduction} \subsection{Research Goals} \begin{frame}{Research Goals} \begin{itemize} \item Distribution of different types of landscapes in the wetland tundra of the Yamal Peninsula \item Monitoring changes in the landscapes of tundra \item Analysis of the landscape dynamics for 2 decades (1988-2011). \item Data: Landsat TM satellite images for 1988 and 2011 \item Application of ILWIS GIS for spatial analysis and data processing on the region of Bovanenkovo, Yamal. \item Technical approach: Remote sensing data processing by ILWIS GIS. \item Methods: Supervised classification of Landsat TM images \item Study area: tundra landscapes in the wetlands of the Yamal Peninsula in the Far North of Russia \end{itemize} \end{frame} \section{Geographic Settings} \subsection{Geomorphology of the Yamal Peninsula} \begin{frame}{Geomorphology of the Yamal Peninsula} \begin{minipage}[0.4\textheight]{\textwidth} \begin{columns}[T] \begin{column}{0.5\textwidth} \begin{figure}[H] \centering \includegraphics[width=4.0cm]{F3.png} \end{figure} \begin{figure}[H] \centering \includegraphics[width=4.0cm]{F4.png} \end{figure} \end{column} \begin{column}{0.5\textwidth} \vspace{2em} Key points on the Yamal geomorphology: \begin{itemize} \item[-] Elevations almost flat, terrain less than 90 m. \item[-] Seasonal flooding \item[-] Active processes of erosion \item[-] Permafrost distribution \item[-] Local formation of ground cryogenic landslides \item[-] Specific ecological and climatic conditions (Arctic) \end{itemize} \end{column} \end{columns} \end{minipage} \end{frame} \subsection{Landscapes of the Yamal Peninsula} \begin{frame}\frametitle{Landscapes of the Yamal Peninsula} \begin{minipage}[0.4\textheight]{\textwidth} \begin{columns}[T] \begin{column}{0.5\textwidth} \begin{itemize} \item[*] Cryogenic landslides are formed as a result of the soil erosion are typical processes in the Yamal Peninsula \item[*] Soil erosion develop as a result of the soil subsidence and soil thawing \end{itemize} \begin{figure}[H] \centering \includegraphics[width=5.0cm]{F1.png} \end{figure} \end{column} \begin{column}{0.5\textwidth} \begin{itemize} \item[*] Cryogen landslides have a negative impact on the local ecosystems \item[*] Cryogen landslides disrupt the strata of the soil and slow down restoration of vegetation after the landslide \end{itemize} \begin{figure}[H] \centering \includegraphics[width=5.0cm]{F2.png} \end{figure} \end{column} \end{columns} \end{minipage} \end{frame} \subsection{Cryogenic Landslides on the Yamal Peninsula} \begin{frame}{Cryogenic Landslides on the Yamal Peninsula} \begin{itemize} \item[--] The negative effect of cryogenic landslides - changes in types of vegetation cover at the place of their formation. \item[--] For 10 years after active cryogenic landslide processes, the area of their occurrence remains uncovered. \item[--] Then, over the next few years, a process of slow restoration of the soil and vegetation cover takes place \item[--] Vegetation succession: plant communities with dominant herbs, mosses, lichens and sedge, willow and meadows with short shrubs. \item[--] Vegetation in the early stages of restoration (mosses, lichens) indirectly indicates recent formation of the cryogenic landslides \item[--] Meadows and willow shrub, on the contrary, indicate a relatively developed and restored plant community. \item[--] Areas subjected to the formation of cryogenic landslides in past 2-3 decades are usually characterized by the spread of willow and shrubbery, an indirect indicator of these processes in the past. \end{itemize} \end{frame} \section{Methods} \subsection{Data Processing Algorithm} \begin{frame}\frametitle{Data Processing Algorithm} \begin{minipage}[0.4\textheight]{\textwidth} \begin{columns}[T] \begin{column}{0.5\textwidth} \vspace{2em} \begin{figure}[H] \centering \includegraphics[width=3.5cm]{F5.png} \end{figure} \begin{figure}[H] \centering \includegraphics[width=3.5cm]{F6.png} \end{figure} Examples of various types of the vegetation typical for the Yamal tundra, Arctic. \end{column} \begin{column}{0.5\textwidth} Algorithms of the data processing in ILWIS GIS: \begin{enumerate}[i] \item Data collection, import and conversion \item Data: 2 Landsat TM images, 1988 \& 2011 \item Data pre-processing \item Georeferencing: WGS 1984 ellipsoid to UTM, E42, NW \item 3 spectral channels for image processing: color composite\& multi-band layers \item Clustering segmentation and classification \item GIS mapping, spatial analysis \item Google Earth imagery verification \item Results interpretation \end{enumerate} \end{column} \end{columns} \end{minipage} \end{frame} \subsection{Research Questions and Aims} \begin{frame}\frametitle{Research Questions and Aims} \begin{minipage}[0.4\textheight]{\textwidth} \begin{columns}[T] \begin{column}{0.5\textwidth} \vspace{2em} \begin{figure}[H] \centering \includegraphics[width=5.0cm]{F7.png} \end{figure} \end{column} \begin{column}{0.5\textwidth} \vspace{2em} Research questions and aims: \begin{enumerate}[(I)] \item The aim of the work is the use of GIS and RS data (Landsat TM) for monitoring tundra land cover types \item Approaches: images classification, visualization and mapping \item Have landscapes within the test territory of the study region changed over the past 14 years (1988-2011)? \item What types of land cover types were dominating previously, and which ones are now ? \item Methodologically, how ILWIS GIS can be used to process RS data ? \end{enumerate} \end{column} \end{columns} \end{minipage} \end{frame} \subsection{Landsat TM images} \begin{frame}\frametitle{Landsat TM images} \begin{minipage}[0.4\textheight]{\textwidth} \begin{columns}[T] \begin{column}{0.5\textwidth} \begin{itemize} \item AOI mask: 67\degree 00'-72\degree 00'E-70\degree 00'- 71\degree 00'N \end{itemize} \begin{figure}[H] \centering \includegraphics[width=5.0cm]{F8.png} \end{figure} \end{column} \begin{column}{0.5\textwidth} \begin{itemize} \item Time span: 23 years (1988-2011) \item Images taken during June to assess vegetation \item Original Landsat TM images (.tiff) were converted to the Erdas Imagine .img format. \end{itemize} \begin{figure}[H] \centering \includegraphics[width=5.0cm]{F9.png} \end{figure} \end{column} \end{columns} \end{minipage} \end{frame} \subsection{Image Georeferencing} \begin{frame}\frametitle{Image Georeferencing} \begin{figure}[H] \centering \includegraphics[width=10.0cm]{F14.png} \end{figure} Georeference Corner Editor of ILWIS GIS \end{frame} \subsection{Spectral Reflectance} \begin{frame}\frametitle{Spectral Reflectance (SR)} \begin{enumerate}[SR i.] \item Image classification is grouping pixels into classes (merging pixels) \item Clusters correspond to the types of vegetation cover according to the AOI settings \item Classification is based on using spectral brightness of the image pixels \item Spectral and texture characteristics of various land cover types are displayed on the image as different spectral brightnesses of the pixels \item Spectral reflectances show spectral reflectivity of the land cover types (through pixels' spectral brightness) and individual properties of the vegetation objects detected on a raster image \end{enumerate} \end{frame} \subsection{Image Clustering} \begin{frame}\frametitle{Image Clustering} \begin{enumerate}[(a)] \item Cluster analysis is a statistical procedure for grouping objects (pixels on a raster image) \item Pixels are ordered into homogeneous thematic groups (clusters) \item Each digital pixel in the image is assigned to the corresponding land cover type group \item Grouping is based on the proximity of the spectral brightness value (Digital Number, DN) of the pixel to the centroid. \item The logical segmentation algorithm consists of grouping the pixels in the image (merging pixels) into clusters. \item Grouping pixels occurs in semi-automatic mode based on the distinctness from neighboring (neighbor pixels). \item The process is repeated interactively until optimal values of the classes (and pixels attached to these classes) are reached. \end{enumerate} \end{frame} \subsection{Image Classification} \begin{frame}\frametitle{Image Classification (IC)} \begin{minipage}[0.4\textheight]{\textwidth} \begin{columns}[T] \begin{column}{0.5\textwidth} \vspace{2em} \begin{figure}[H] \centering \includegraphics[width=4.0cm]{F10.png} \end{figure} \begin{figure}[H] \centering \includegraphics[width=4.0cm]{F11.png} \end{figure} \end{column} \begin{column}{0.5\textwidth} \vspace{2em} \begin{enumerate}[{IC}-1] \item Thematic mapping is based on the results of the classification of images \item Visualization of the landscapes' structure and vegetation types within the AOI. \item To classify land cover types, image pixels were identified for each category and grouped into different land categories. \item Land cover types were evaluated and identified with each land cover class \item Number of cluster groups is 13 representing vegetation land cover types of the Yamal tundra \end{enumerate} \end{column} \end{columns} \end{minipage} \end{frame} \section{Results} \subsection{Mapping Results} \begin{frame}\frametitle{Mapping Results} \begin{minipage}[0.4\textheight]{\textwidth} \begin{columns}[T] \begin{column}{0.5\textwidth} 1988 \begin{figure}[H] \centering \includegraphics[width=5.0cm]{F12.png} \end{figure} \end{column} \begin{column}{0.5\textwidth} 2011 \begin{figure}[H] \centering \includegraphics[width=5.0cm]{F13.png} \end{figure} \end{column} \end{columns} \end{minipage} \end{frame} \subsection{Results Interpretation} \begin{frame}\frametitle{Results Interpretation} \begin{itemize} \item Statistical results of calculations of types of vegetation cover were obtained in a semi-automatic mode in ILWIS GIS \item 1988 'willow shrubs' type covered 412,292 pixels from the total part of the AOI, and 'high willow' class is 823,430 pixels \item 2011: willow increased to 651427 pixels, ('willow shrubs'), and 893092 pixels ('high willows') \item Both combined classes of willows, typical for AOI with a high water content, cover total 1544519 pixels, which is 40.27 \%. \item Area of grasses decreased compared to shrub and willow \item Max area covered by class 'heather and dry grass' is 933798 pixels \end{itemize} \end{frame} \subsection{Google Earth Verification} \begin{frame}\frametitle{Google Earth Verification} \begin{minipage}[0.4\textheight]{\textwidth} \begin{columns}[T] \begin{column}{0.5\textwidth} \vspace{2em} \begin{figure}[H] \centering \includegraphics[width=5.0cm]{F15.png} \end{figure} The selected area represents one of the most diversified part of the tundra landscapes of Yamal \end{column} \begin{column}{0.5\textwidth} \vspace{2em} \begin{itemize} \item AOI has a complex structure of boggy landscapes and unique types of vegetation \item Therefore, in order to control the most difficult areas, the images were verified by Google Earth \item Visualization of the same area in the satellite image and Google Earth image at the same time. \item This made it possible to visually check heterogeneous areas with mixed land cover types and landscapes \end{itemize} \end{column} \end{columns} \end{minipage} \end{frame} \section{Conclusions} \begin{frame}{Conclusions} \begin{enumerate}[I.] \item Monitoring landscape changes is an important tool for assessing the ecological stability of a region \item Spatial analysis of the multi-temporal satellite images by ILWIS GIS algorithms is an effective tool \item Research demonstrated how Yamal wetland tundra landscapes changed over a 23-year period of time \item Data included LandsatTM satellite imagery covering the Yamal Peninsula, Far North of Russia \item Image processing was done by classification methods. \item Results shown changes in the landscapes from 1988 to 2011 \item Results confirm presence of the destructive processes caused changes in tundra boggy landscapes. \item Research demonstrated successful ILWIS GIS based of the RS data analysis, effective for tundra monitoring \end{enumerate} \end{frame} \section{Thanks} \begin{frame}{Thanks} \centering \LARGE \emph{Thank you for attention !}\\ \vspace{5em} \normalsize Acknowledgement: \\ Current research has been funded by the \\ Finnish Centre for International Mobility (CIMO) \\ Grant No. TM-10-7124, for author's research stay at \\ Arctic Center, University of Lapland (2012). \end{frame} %%%%%%%%%%% Bibliography %%%%%%% \section{Bibliography} \Large{Bibliography} \nocite{*} \printbibliography[heading=none] %%%%%%%%%%% Bibliography %%%%%%% \end{document} %Changing the font size locally (from biggest to smallest): %\Huge %\huge %\LARGE %\Large %\large %\normalsize (default) %\small %\footnotesize %\scriptsize %\tiny \end{document}
{ "alphanum_fraction": 0.7119777862, "avg_line_length": 35.2917594655, "ext": "tex", "hexsha": "104d7f14e0d224431b2ef2a38ce3bca4d4d290ad", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c371098f7b30f413d1ae64c1d27c983928afb2ef", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "paulinelemenkova/LaTeX-Presentations", "max_forks_repo_path": "Lemenkova-Yoshkar-Ola.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c371098f7b30f413d1ae64c1d27c983928afb2ef", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "paulinelemenkova/LaTeX-Presentations", "max_issues_repo_path": "Lemenkova-Yoshkar-Ola.tex", "max_line_length": 212, "max_stars_count": null, "max_stars_repo_head_hexsha": "c371098f7b30f413d1ae64c1d27c983928afb2ef", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "paulinelemenkova/LaTeX-Presentations", "max_stars_repo_path": "Lemenkova-Yoshkar-Ola.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4315, "size": 15846 }
\documentclass[]{article} \usepackage{paralist} % Used for the compactitem environment which makes bullet points with less space between them \usepackage{amsmath} \usepackage{graphicx} \graphicspath{ {pics/} } \usepackage{algorithm}% http://ctan.org/pkg/algorithm \usepackage{algpseudocode}% http://ctan.org/pkg/algorithmicx \usepackage{multirow} \usepackage{wrapfig} \usepackage[a4paper, total={6in, 10in}]{geometry} \usepackage{tabularx} \newcolumntype{Y}{>{\centering\arraybackslash}X} % ADD THE FOLLOWING COUPLE LINES INTO YOUR PREAMBLE \let\OLDthebibliography\thebibliography \renewcommand\thebibliography[1]{ \OLDthebibliography{#1} \setlength{\parskip}{0pt} \setlength{\itemsep}{0pt plus 0.3ex} } \usepackage[affil-it]{authblk} \usepackage{etoolbox} \usepackage{lmodern} \makeatletter \patchcmd{\@maketitle}{\LARGE \@title}{\fontsize{16}{8}\selectfont\@title}{}{} \makeatother \renewcommand\Authfont{\fontsize{10}{8}\selectfont} \renewcommand\Affilfont{\fontsize{7}{8}\itshape} %---------------------------------------------------------------------------------------- % TITLE SECTION %---------------------------------------------------------------------------------------- \title{\vspace{-2.0cm}Towards Bayesian lifelong learning in mapping} \author{Panagiotis Chatzichristodoulou, Rico M\"{o}ockel, Kurt Driessens, Anne Van Rossum} \affil{University of Maastricht, DoBots B.V.} \date{} %---------------------------------------------------------------------------------------- \begin{document} \maketitle % Insert title %---------------------------------------------------------------------------------------- % ARTICLE CONTENTS %---------------------------------------------------------------------------------------- \section{Introduction} Simultaneous localization and mapping is one of the fundamental problems of autonomous systems\cite{probRobs}. In order for robots to be truly autonomous they have to be able to enter an environment and map its structure. To that direction, a lot of effort has been put in algorithms that are able to map static environments; with solutions like EKF-slam\cite{ekf} and FastSlam\cite{slam} we can now efficiently map such environments. The logical extension to methods that can map static environments is methods that remove this restriction. The idea of lifelong robot learning was introduced as a general concept to the literature by Sebastian Thrun\cite{liflonglearning}. Konolige et al.\cite{lifelongmaps} specifically focus on lifelong learning in mapping. In the PhD thesis of Walcott\cite{aishalong} long term mapping methods are decomposed to 4 basic subproblems: the problem of being able to continuously incorporate new information, tracking the growing DPG, detecting changes and update the map online, being able to handle changes to the map as changes occur with the passage of time. The first two problems can be though of as compression problems as the map increases over time whereas the latter ones can be though of as dynamic environment problems. In this project the focus will be directed towards slam methods that use RGBD devices like Microsoft's Kinect to perform slam. The goal of this thesis is to introduce a novel approach to tackle the compression problem of long term mapping methods that use the Kinect devices while using Bayesian non parametric methods as the base of the solution. Dirichlet processes and Dirichlet process mixture models\cite{nonParam} are the cornerstone of Bayesian non parametric statistics. The strength of those models lies in the fact that they allow the model's mixture components to grow as much as needed so as to best fit the data. The main motivation of this thesis is to integrate such methods to mapping algorithms as a means of creating compressed representations of the environment that also retain enough expressivenes so that they can be used as reference points when navigating through the environment. That would tackle the compression subproblem of long term mapping and would be a solid first step towards a general Bayesian solution to the long term mapping problem. The rest of the paper is structured as follows. In Section \ref{sec:literature} relevant literature review will be reviewed, Section~\ref{sec:theory} will introduce the basic background theories of the model, Section~\ref{sec:model} will define the method presented in this paper, Section ~\ref{sec:results} will show experimental results of the method. Finally, conclusion and future directions are presented in Section ~\ref{sec:discussion}. %------------------------------------------------ \section{Literature review} \label{sec:literature} Literature review will be focused on 4 related sub fields: Object based slam or semantic slam, point cloud object segmentation, non-parametric clustering methods ad the correspondence problem in slam. \subsection{Object based slam} Salas-Moreno et al.\cite{slam++} define a method of performing object based slam for specific classes of objects. The objects are identified by camera that is on top of the robot. By having a model of pre-trained objects, slam can be performed on environments where the robot knows what objects to expect. Castle et al. use object recognition to perform object based slam with the use of a hand-held cameras. Selvatici et al.\cite{objslam} use a similar approach while exploiting structural information such as object height and position within the room. That way a couch that is a large object situated in floor level is easier to be recognized. Choudhary et al.\cite{objectpointslam} use point clouds and an object database to match objects currently seen with known objects within their database. They use omnimaper\cite{omnimaper} as their mapping method and as a representation a combination of the downsampled voxel grids with additional normal and curvature information. Finally, all their operations are done in the non-planar components of the point cloud. Seongyong Koo et al.\cite{objectDisc} introduce a method of unsupervised object individuation from RGB-D image sequences. They cluster their initial cloud into candidate objects using Euclidean clustering and proceed to extract features like the Euclidean distance(L2) and the Kullback-Leibler distance between point cloud objects. They use IMFT to solve their tracking problem. \subsection{Point Cloud Object clustering} Trevor et al.\cite{pointSeg} take positional information, Euclidean distances and the normal of points to as input to their functions and output segments that are part of the same object. PCL library\cite{pcl} introduces methods like Euclidean clustering and conditional Euclidean clustering that use a number of heuristics that take normal as well as curvature information to extract segments in the point cloud that represent objects. Furthermore, a there is a lot of research on segmentation of point clouds in scenes, the emphasis is usually on extracting geometric primitives~\cite{planarSeg},\cite{planarSeg2} using cues like normals and curvature. Rabbani et al.\cite{segOverview} introduce a new method of object segmentation using KNN as their base algorithm. They also present a very informative literature review along with the strengths and weaknesses of existing methods. Finally Triebel et al.\cite{smartSeg} introduce a general clustering framework that does not rely on plane segmentation. Instead of segmenting the plane by using classical approaches like RANSAC or MLASAC they introduce a framework where they make no assumptions regarding plane data. \subsection{Non Parametric Bayesian methods} Dirichlet processes and Dirichlet process mixture models are the cornerstone of Bayesian non-parametric statistics. Radford M. Neal\cite{bayes:neal} with his paper regarding MCMC methods for Dirichlet process mixture models made the definitive step towards Dirichlet process mixture models(DPMM's) receiving a lot of attention. Variational inference for DPMM's, introduced by Jordan et al.\cite{bayes:jordan} introduces deterministic tools to perform inference and approximate the posterior distribution and marginals of a dataset. Both methods have strengths and weaknesses and many tools have been established by using the two approaches as their base. Particle filter approaches of inference have also been established with Doucet et al.\cite{bayes:smc} introduce Sequential Monte Carlo as a fast way to approximate inference. For the purpose of this paper an SMC sampler will be defined in detail in the Model definition section. \subsection{Correspondence} Under the semantic slam context, correspondence refers to the problem of identifying objects as ones that have been encountered before during the mapping process. Towards that direction Cree et al.\cite{corresp:first} create a histogram of line segments of each landmark and compute their root mean square error. They then proceed to calculate their RGB signature to calculate the distance between different landmarks. Low et al.\cite{corres:sec} match Scale Invariant Feature Transform (SIFT) features, an approach which transforms image data into scale-invariant coordinates relative to local features. Lamon et al.\cite{corres:three} store a database of fingerprints which indicate the location in the robot's environment. The features are ordered and stored at a database at as they appear in the robot's immediate surroundings. A new fingerprint is computed for each new view and matched against existing ones. Finally, in Seghal et al.\cite{corres:four} an extension of SIFT descriptors to 3D data and point clouds is given. \section{Theory background} \label{sec:theory} The basic theory background regarding the sampler will be presented in this section. Generalized Polya Urn is an extension on basic Urn\cite{caron} models for Dirichlet processes and serves as the base of the sampler presented in the theory section. \subsection{Generalized Polya's Urn} Dirichlet process priors have been widely used in the literature as non parametric Bayesian tools to estimate the number of clusters in the data\cite{antoniak}. Dependent dirichlet processes extend those tools by allowing the clusters in the data to vary with some variance over time by introducing dependencies on the data which can be temporal, positional etc. The DDPs are a natural extension of the DP's in domains where data cannot be considered exchangeable. The main motivation behind using such tools is that they can naturally be extended to dynamic environments to tackle the dynamic part of the long term slam problem. A DDP also known as Generalized Polya Urn(GPU)\cite{caron} and has the property of randomly deleting partitions of clusters on every iteration. That way, it can cope with the variance of the data. The current notation defines the $n_{th}$ datapoint at time $t$,$x_{t,n}$ having an assignment $c_{t,n}$ at cluster $k \in \{1,2,..., K\} $. The size of cluster $k$ at time $t$ is defined as $s_t^k$. The GPU of this model at time $t$ can now be defined as: \begin{algorithm} \caption{GPU}\label{GPU} \begin{algorithmic}[1] \Procedure{GPU}{$pointCloud, t$} \For{\texttt{$k = 1,...K_{t-1,N_{t-1}}$}} \State Draw $\Delta s_{t-1}^k \sim Binom(s_{t-1,N_{t-1}}^k, \rho) $ \State Set $s_{t,0}^{k} = s_{t-1,N_{t-1}}^{k} -\Delta s_{t-1}^k$ \EndFor \For{\texttt{$n = 1,...N_t$}} \State Draw $c_{t,n} \sim Cat( \frac{ s_{t,n-1}^{1} }{\alpha + \sum_k s_{t,n-1}^{k} }, \frac{ s_{t,n-1}^{K_{t,n-1}} }{\alpha + \sum_k s_{t,n-1}^{k} } , \frac{ \alpha}{\alpha + \sum_k s_{t,n-1}^{k} }) $ \State If $c_{t,n} \leq K_{t,n-1}\ set:\ s_{t,n}^{c_t,n} = s_{t,n-1}^{c_t,n} + 1 , K_{t,n} = K_{t,n-1}$ \State If $c_{t,n} > K_{t,n-1}\ set:\ s_{t,n}^{c_t,n} = 1 , K_{t,n} = K_{t,n-1} + 1$ \EndFor \EndProcedure \end{algorithmic} \end{algorithm} Where Cat is a categorical distribution, Bin is the binomial distribution, $\alpha$ is the DP concentration parameter and $\rho$ is the deletion parameter of the GPU. This Generative Polya Urn distribution also has the shorthand notation GPU($\alpha,\rho$) This process can be though of in the terms of a general chinese restaurant process\cite{antoniak} as shown in Fig.~\ref{generalPolya}. At time $t$, suppose there are $n$ customers seating at several tables in the restaurant. Each customer has to decide if he/she will remain at table with probability $p$ or leave the restaurant with probability $1-p$. Once all the customers make their decisions they leave the restaurant or remain seated (b). Each table occupied is moved according to the number of customers still seating in that table (c). A new customer then enters the table and either chooses to sit on one of the existing tables (e) or choose a new with probability proportional to the strength parameter $\alpha$ of the model(f). \begin{wrapfigure}{l}{0.4\textwidth} \includegraphics[width=0.9\linewidth]{generalPolya} \caption{General Polya} \label{generalPolya} \end{wrapfigure} \section{Model definition} \label{sec:model} \subsection{General pipeline} The general flow of operations that occur in the EKF module is presented in Fig.~\ref{pipeline}. The slam module requests new observation readings given the cloud currently read by the sensors and the position of the robot. The pipeline takes that cloud, extracts clusters and returns the landmarks currently being observed while taking into account landmarks that where observed in the past. Landmarks and clusters are identical concepts representing a different layer in the pipeline. More specifically, clusters are output from the sampler from the sampler and are given as an input of landmarks to the EKF module. \begin{wrapfigure}{l}{0.4\textwidth} \centering \includegraphics[width=.4\textwidth]{workflowSpec} \caption{Pipeline flow} \label{pipeline} \end{wrapfigure} \begin{algorithm} \caption{Landmark Layer}\label{euclid} \begin{algorithmic}[1] \Procedure{getLandmarkIds}{$pointCloud, timepoint, existingLandmarks$} \State $initialize(landMarkIds)$ \State $pointCloudReduced \gets extractMetaFeatures(pointCloud)$ \State $features \gets extractMetaFeatures(pointCloudReduced)$ \State $landmarks \gets cluster(features)$ \For{\texttt{$landmarks$ as $landmark$}} \State $ (similarity, landId) \gets calcBestSim(landmark, existingLandmarks) $ \If{$similarity >threshold$} \State $ addLandmarks(landMarkIds, landId)$ \Else \State $ newLandID \gets addLandmarkDB(landmarkDB, landmark)$ \State $addLandmarks(newLandID)$ \EndIf \EndFor \State \textbf{return} $ landMarkIds$\Comment{Return landmarks} \EndProcedure \end{algorithmic} \end{algorithm} \textbf{Method input:} The method takes as input a point cloud as it is currently being read by the kinect sensor. \textbf{Lines 3-4:} The preprocessing the cloud is done in these steps. Feature extraction is done through the pcl\cite{pcl} library. A voxel grid is used to reduce the dataset size. A leaf size of approximately 3cm produces a good trade-off between precision and speed. The object representation used approach is similar to\cite{objectpointslam}. Instead of using the CSHOT descriptor, fpfh\cite{fpfh} histogram is used instead. A fast point feature histogram(fpfh) represents an angular signature between a point and its neighbors. The color signature of the cloud is being encoded with an approach similar to ~\cite{smcddp}. The color spectrum is discretized and what is extracted is the count of different color signatures between a point and its k nearest neighbors. Finally the position of every point is also given as input to the algorithm. The pipeline is presented in figure Fig.~\ref{pipeline}. The preprocessing outputs a vector of $\textbf{x} =(x_s, x_c, x_a)$ where $s$ represents the position information of the point, $c$ the color information of the point's neighbors and $a$ the angle information of the point's neighbors. \textbf{Lines 5:} The clustering takes place in this line. The input of the method is the feature vector for every data point which is calculated in the previous steps. The clustering is done using the SMC sampler presented in the model definition section. \textbf{Lines 6-12:} The correspondence of previously seen landmarks to current observations is computed in lines 6-12. The calcBestSim function returns the landamrk with the highest similarity match with the landmarks already stored in the database. \textbf{Lines 15:} The algorithm returns the list of the landmarks the robot sees in this current time. Now that the basic theories are defined, the pipeline's components and its operating mechanisms can be presented in more detail. \subsection{The sampler} \subsubsection{The data distribution} \label{data:dist} Each point $x$ in the cloud is represented as a tuple $x =(x^s, x^a, x^c) $ where superscript $s$ represents spatial information, $a$ angle information, and $c$ color information. The method those features are extracted is explained in lines 3 and 4 in the general pipeline section. For the purpose of this project each point in the cloud is represented by vector of length 33 with the first three elements representing the space information, elements 4-6 angle information, and the rest color information. The object model is a mixture of distributions over the data with each object being modeled as D($\theta_t^k$) where $\theta$ represents the parameters of object $k$ at time $t$. More specifically, each set \textbf{x} with $n$ datapoints at time $t$ is distributed as: $$ x_{t,n} \sim D(\theta_t^k) = Normal(x_{t,n}^s| \mu_t, \Sigma_t) Mult(x_{t,n}^c | \delta_t) Exp(x_{t,n}^a | \lambda_t) $$ \begin{wrapfigure}{l}{0.25\textwidth} \centering \includegraphics[width=.25\textwidth]{Kullback-Leibler} \caption{Exponential trend of distances} \label{pcl:kl} \end{wrapfigure} Where Normal is a three dimensional Gaussian distribution with mean $\mu$ and covariance $\Sigma$ representing the positional distribution of the data; Mult is a Categorical multinomial distribution with parameter vector $\delta$ representing the weights of the color distribution and Exp is an exponential with shape parameter $\lambda$ representing the angle distribution of the data within the cluster. The exponential distribution was chosen to model angular information after empiric evaluation showed that it would be a good fit for the angle signature distribution of the data as shown in Fig.~\ref{pcl:kl}. Now that the distribution of the objects is defined, the progression of the sufficient statistics at time $t$ given $t-1$ given by: $\theta_t^k | \theta_{t-1}^k \sim \begin{cases} T (\theta_{t-1}^k) &\mbox{if } k \leq K_{t-1} \\ G_0 & \mbox{if } k > K_{t-1}. \end{cases}$ Where $T$ represents the transition kernel of the data given the previous state in the model. The case $ k > K_{t-1} $ represents the creation of a new cluster and $G_0$ is the base distribution of the DDP. In our case, the conjugate priors of the distributions of the data were chosen to model the base distribution. Therefore, $G_0$ is defined as: $$ G_0(\theta_t^k) = NiW( \mu_t^k, \Sigma_t^k | \kappa_0, \mu_0, \nu_0, \Lambda_0 ) Dir(\delta_t^k | q_0) Gam( \lambda_t^k | \alpha_0, \beta_0) $$ Where NiW is a Normal inverse Wishart distribution, Dir denotes a Dirichlet distribution, and Gam the Gamma distribution. $ \kappa_0, \mu_0, \nu_0, \Lambda_0, q_0,\alpha_0$ and $\beta_0$ are predefined parameters of the model. The generative process for the Dependent Dirichlet mixture model can be written for each timestep $t$ as: \begin{compactitem} \item Draw $c_t$ $\sim$ $GPU(\alpha, \rho) $ \item $\forall$ k draw: $ \theta_t^k | \theta_{t-1}^k \sim \begin{cases} T (\theta_{t-1}^k) &\mbox{if } k \leq K_{t-1} \\ G_0 & \mbox{if } k > K_{t-1}. \end{cases}$ \item $\forall$ point $n$ draw $ x_{t,n} \sim F(\theta_t^{c_t,n})$ \end{compactitem} Given the theory in \cite{caron}, the transition Kernel must satistfy: $$ \int G_0(\theta_k) T(\theta_t^k | \theta_{t-1}^k) d\theta_{t-1}^k = G_0(\theta_k) $$ The equation means that the invariant distribution must equal its base distribution. A typical way of meeting this restriction and forcing the sampler to converge to the original target density\cite{smc:theory} is to introduce a set of M auxiliary variables \textbf{z} such that: $$ P(\theta_t^k | \theta_{t-1}^k) = \int P(\theta_t^k | z_{t}^k) P(z_t^k| \theta_{t-1}^k) dz_t^k $$ The transition kernel of the model can now be sampled by using the following formula $\theta_t^k \sim T(\theta_{t-1}^k) = T_2 \circ T_1(\theta_{t-1}^k)$ with: \begin{equation} \label{eq1} \begin{split} z_t^k & \sim T_1(\theta_{t-1}^k) = Normal(\mu_{t-1}, \Sigma_{t-1}) Mult( \delta_{t-1}) Exp( \lambda_{t-1})\\ \mu_t, \Sigma_t, \delta_t, \lambda_t & \sim T_2(z_t^k) = NiW( \kappa_0, \mu_0, \nu_0, \Lambda_0 ) Dir(q_0) Gam(\alpha_0, \beta_0) \end{split} \end{equation} where $\mu_t, \Sigma_t, \delta_t, \lambda_t$ are posterior parameters given the auxiliary variables $z$. \subsubsection{Sequential monte carlo sampler} Sequential monte carlo samplers for Dirichlet process mixture models where introduced by Doucet et al~\cite{doucet} and serve as fast alternative to MCMC and VI methods of performing posterior inference. Given the previous definitions, the sampler is now defined as: \begin{algorithm}[h] \caption{SMC for DDPM}\label{SMC} \begin{algorithmic}[1] \State \textbf{Input:} Points \{$x_{1,1:N_t}, ..x_{T,1:N_t}$\}with extracted features \State \textbf{Output:} Clusters representing of the data \For{$t = 1,...T$} \For{$ l = 1,...L$} \For{$ iter = 1,...S$} \State Sample $(c_t)^{(l)} \sim Q_1$ \State Sample $(\theta^k ) \sim Q_2$ \EndFor \EndFor \For{$ k = 1,...K$} \State Sample $\Delta s_{t-1}^k \sim Binom( (s_{t-1,N_{t-1}}^k)^{(l)}, \rho) $ \State Set $s_{t,0}^{k} = s_{t-1,N_{t-1}}^{k} -\Delta s_{t-1}^k$ \State Sample $( (z_{t+1}^k)^{(l)} ) \sim T_1((\theta_t^k))^{(l)} $ \EndFor \State compute particle weights $w_t^l$ \EndFor \State Normalize and resample weights \end{algorithmic} \end{algorithm} \subsubsection{Gibbs updates} The proposal distribution $Q_1$ is the probability of an assignment $c_{t,n}$ given cluster sizes, parameters and concentration $\alpha$. Formally $Q_1$ can be written as: \begin{equation} \label{Gibbs} Q_1(c_{t,n} | s_{t,n}^k, \theta_t^k, \alpha) \propto Cat( s_{t,n}^1,...s_{t,n}^K, \alpha ) \times \begin{cases} F(x_{t,n} | \theta_t^{c_t} ) &\mbox{if } k \leq K_{t-1} \\ \int P(x_{t,n} | \theta_t )G_0(\theta) d\theta & \mbox{if } k > K_{t-1}. \end{cases} \end{equation} Where $c_{t,n}$ represents cluster $c$ of point $n$ at time $t$, $s$ represents cluster sizes. The integral represents the posterior predictive distribution of the cluster times the base distribution with the parameters integrated out. A review of the literature helps understand how the posterior predictive formula is derived. More specifically, the analytic expression of the integral is: \begin{equation} \label{Q1} \begin{split} \int P(x_{t,n} | \theta_t )G_0(\theta) d\theta = t_{\nu_0-1}( x_{t,n}^s | \mu_0, \frac{\Lambda_0(\kappa_0+1)}{\kappa_0(\nu_0-1)}) \times \prod_{j=1}^V \frac{\Gamma(x_{t,n}^c)}{\Gamma(q_0)} \times \\ \frac{\Gamma(\sum_{j=1}^V q_0)}{\Gamma(\sum_{j=1}^V x_{t,n}^c)} \times Lomax(\alpha_0 + s_{t,n}^c, \beta_0 \sum_{j=1}^V x_{t,n}^c) \end{split} \end{equation} Where $t$ represents student's t-distribution with $\nu$ degrees of freedom, Lomax represents Lomax distribution with shape and scale, $\alpha$ and $\beta$ repsectively and the rest represent a Dirichlet-Multinomial(aka DirMul) distribution. The formulas of the posterior predictive distributions can be found in the literature with \cite{compendium} being a good example. The conjugacy of the base and prior distribution allows for an easy sampling formula for proposal distribution $Q_2$ which is of the form: \begin{equation} \label{Q_2} \begin{split} Q_2(\theta_t^k | \theta_{t-1}^k , x_t^k, z_t^k) \propto F( x_t^k | \theta_k) \times T_2(\theta_t^k | z_t^k) \\ = NiW( \mu_t^k, \Sigma_t^k | \kappa_n, \mu_n, \nu_n, \Lambda_n ) Dir(\delta_t^k | q_n) Gam(\lambda_t^k | \alpha_n, \beta_n) \end{split} \end{equation} With: \begin{equation} \label{udpates} \begin{split} \kappa_n = \kappa_0 + N, \nu_n = \nu_0 + N, \mu_n = \frac{\kappa_0}{\kappa_0 + N} \mu_0 + \frac{N}{\kappa_0 + N} \overline{x}^s\\ \Lambda_n = \Lambda_0 + s_{x}^s, q_N = q_0 + \sum_n x_i^c, \alpha_n = \alpha_0 + N, \beta_n = \beta_0 + \sum^n x_i^a\\ \end{split} \end{equation} Where $\overline{x}$ defines the sample mean for the elements assigned at cluster $c$, $s_{x}$ the sample variance and $N$ denotes the number of observations. The formulas for the updates can be found at the literature of cojugate priors like\cite{compendium}. \subsubsection{Weight updates} The only thing left is to define the weight update step. More specifically, on every time step $t$ the weight of particle $l$ is calculated as: \begin{equation} w_t^{(l)} = \frac {P(c_t^{(l)} , \theta_t^{(l)}, x_t| \theta_{t-1} )}{P(c_t^{(l)} , \theta_t^{(l)}| \theta_{t-1} )} \end{equation} Using Bayes rule, the numerator can be written as: \begin{equation} P(x_t , | c_t^{(l)} , \theta_t^{(l)} \theta_{t-1} ) \times P(c_t^{(l)} , \theta_t^{(l)}| \theta_{t-1} ) \end{equation} Which can be calculated using equations $Q_2$ and $Q_1$ for the first and second part respectively. After the particle weights are normalized particles are drawn with probability proportional to their weights. \subsection{Decision Layer} The decision layer calculates how similar a cluster is to ones encountered before. To achieve that, distance measures must be defined between the stored clusters and the ones that are inferred at the current iteration of the algorithm. Distances between distributions are called divergences and a large amount of literature on divergences exists. Every cluster consists of a three part distribution as it was defined in section \ref{data:dist}. Now let $l$ be the distribution of a stored cluster and $o$ the distribution of a currently observed cluster. $l$ and $o$ can be decomposed into 3 parts: $l_G$,$l_C$,$l_E$ where G,C and E stand for Gaussian, Categorical and Exponential respectively. With that notation the distances between those distributions can be defined. For each individual landmark distribution $l$ and observation distribution $o$ the distances computed where the following: Wesserstein($l_G,o_G$), Kullback-Leibler($l_G,o_G$), SquaredHellinger($l_E,o_E$), Kullback-Leibler($l_E,o_E$), Kullback-Leibler($l_C,o_C$). The distance between every distribution can be transposed of a vector of length 5. That way, deciding if a cluster is part of a landmark that has been encountered before is now a problem of finding the optimal decision boundary given the distances at hand. For this project, the decision boundary by empiric evaluation of the landmarks. \subsection{Complexity} The complexity can be decomposed into three parts. The cloud downsampling, the clustering and the decision making process. $$ O(total) = O(filter) + O(cluster) + O(decision) $$ \textbf{Downsampling}: The complexity of the cloud downsampling pipeline can be decomposed to the one of its components. This means that the decomposed complexity is defined as follows: $$O(filter) = O(Downsampling + Stat\ Removal + RANSAC+ FPFH + Color\ estimation) $$ Voxel downsampling searches for neighbors within a distance defined by the user and keeps an average value that equally represents the cloud. Since the operation involves searching for neighbors of a point, and since search operations take $O(log\ n)$ time where N is the number of points within the cloud, the complexity of voxelGrid downsampling is $O(k log n)$ where $k$ is the number of neighbors and $n$ the number of points in the cloud. Statistial outlier removal searches for k nearest neigbhors and removes those whose deviation is passed a certain threshold. Searching for k neighbors in cloud has a complexity of $O(k\ log\ n)$.A high amount of research has been done regarding the optimal complexity of RANSAC~\cite{RANSAC}. RANSAC has a complexity of $ O(k+ m_s*N)$ where k is the maximum amount of iterations defined by the user, $m_s$ the average number of models per sample $N$ the number of data points. FPFH operations have a complexity of $O(nk)$ as given in~\cite{fpfh}. Finally, for the operation of color estimation, the k nearest neighbors are chosen and some constant operation is performed on them. The complexity for color estimation then becomes $O(k\ log\ n)$ where $k$ is the number of neighbors, $n$ the number of points. The downsampling pipeline has a total complexity of: \begin{equation} \label{Q_filt} O(filter) = O(k_{0}\ log\ n_{init} + k_{1}\ log\ n_{1} + k_{2}+ m_s*n_{2} + n_{3}k_{3} + k_{4}\ log\ n_{3} ) \end{equation} Different $k$ indexes represent the number of neighbors defined for every operation. The $n$ represents the number of points used as input. Using the notation of equation \ref{Q_filt}, $n_{init}$ defines the whole cloud, $n_1$ the cloud after operation 1, $n_2$ the cloud after operations 2 etc. \textbf{Clustering}: The complexity of the SMC sampler is defined in \cite{smcddp} as $O(TLKSN)$ where $T$ defines the time frames, $L$ the number of particles, $K$ the number of clusters, $S$ the number of samples, and $N$ the size of the dataset. \textbf{Decision making}: The decision making takes $ O(\kappa * l^2) $ computational time where $\kappa$ defines the number of clusters output by the sampler and $l$ the number of landmarks currently stored in the database. The final complexity of the method can then be defined as: \begin{equation} \label{Complexity} O= O(k_{0}logn_{0} + k_{1}logn_{1} + k_{2}(t_M)+ m_s*n_{2} + n_{3}k_{3} + k_{4}logn_{3} + LKSn_3 + \kappa * l^2) \end{equation} \subsection{Landmark size} The basic block of this algorithm is a cluster containing an environment signature. In order to be able to compute how scalable the method is, the size of a single landmark will be computed. Each landmark is represented by a single row in an SQLite database. Each row consists of 32 real numbers and 1 auto increment integer. Given that SQLite databases assign the memory dynamically, the maximum amount of memory a cell can take will be calculated so that every other case is also included. According to the SQLite manual of datatypes a real value takes up to 8 bytes of memory. An auto increment integer takes up to 4 bytes of information, so the total number of memory for a worst case scenario landmark is 260 bytes. This number can vary greatly over different environments and is a safe assumption to calculate the memory this method occupies. \section{Results} \label{sec:results} \subsection{Simple datasets} In this section the algorithm was tested against a simple dataset. Testing the method on simple datasets will make easier the extension to more complex clouds that will be used when mapping the environment. In order to test the sampling as well as the decision layer of the algorithm, a simple dataset provided by the pcl\cite{pcl} library was used. More specifically, a cloud consisting of two milk cartridges with different colors and poses where used. The initial cloud is shown in Fig.\ref{pcl:clust}(a). The cloud was given as input to the downsampling pipeline. The reduced cloud was then passed as input to the sampler and the clustering results are shown in Fig.\ref{pcl:clust}(b). \begin{wrapfigure}{l}{0.5\textwidth} \begin{tabular}{c} \includegraphics[width=.4\textwidth]{clusterings/coloursSource} \\ (a) Raw cloud \\ \includegraphics[width=.4\textwidth]{clusterings/coloursCorrect} \\ (b) Post clustering \\[6pt] \end{tabular} \label{pcl:clust} \end{wrapfigure} More specifically, the reduced point cloud is shown in the top left part. It is significantly smaller in size and this cloud along with all the meta-information needed to perform the clustering are given as input to the sampler. The sampler outputs a mixture of distributions that best fits the input data. The clustering output is shown in Fig.\ref{pcl:clust}(b) with the top right being the Gaussian distributions inferred, bottom left the exponential and bottom right the categorical representing the color information of the cloud. The height of the objects leads to distribution with high variance in the z axis. The sampler outputs 2 clusters for the data with each box being assigned separately. The color signature each cluster carries is correctly captured in the bottom right part of Fig.\ref{pcl:clust}(b). The two boxes are similar in size but their orientation is different and this slight difference is shown in the exponential part of the signature. Each box is now captured as an environment signature that consists of a Gaussian, a Categorical, and an Exponential part. \begin{center} \begin{tabularx}{1.01\textwidth}{|c *{6}{|Y}|} \hline \multicolumn{6}{|l|}{\centerline{UUID 1}} \\ \hline LandUUID & GausKL & GausWes & ExpHel & ExpHel & CatKL \\ \hline 1&0&1.18111e-07&0&0.164045&0 \\ \hline 2&13.5579&22449.9&1.56956&0.376699&13.8155 \\ \hline \end{tabularx} \label{dist} \end{center} Table~\ref{dist} shows the distances between every cluster computed against the first. The Gaussian counterparts of the clusters have significant distances due to the distance every cluster has in space. On the other hand, the Distances between their Exponential parts are not that large and the overlap between the categorical singatures leads to medium distances in their Categorical counterparts. Those distances are the information that is being passed to the decision layer to match existing landmarks with new ones. \subsection{Expresiveness and decision layer} Post clustering, of operations in the pipeline have to do the comparison of clusters currently extracted with landmarks already stored in the database. The accuracy of the operations is dependent on the expressiveness of the signatures the clusters carry and this is displayed in the examples presented in Fig.\ref{pip:bounds}. The decision boundary of the pipeline can be decomposed into three basic parts; a positional, a color and an angular boundary. Fig.\ref{pip:bounds} shows the behavior of the decision layer with respect to the Gaussian(positional) and Categorical(color) parts. \begin{wrapfigure}{l}{0.65\textwidth} \begin{tabular}{cc} \includegraphics[width=.3\textwidth]{colorBound} & \includegraphics[width=.3\textwidth]{colorBound2} \\ (a) Yellow Folder& (b) Blue trashcan \\[6pt] \includegraphics[width=.3\textwidth]{posBound} & \includegraphics[width=.3\textwidth]{posBound2} \\ (c) Folder initial & (d) Folder moved \\[6pt] \end{tabular} \caption{Expresiveness \& decision bounds} \label{pip:bounds} \end{wrapfigure} In Fig.\ref{pip:bounds}(a) the yellow folder allong with some of its environment to the left are being assigned to landmark with id 7383. The folder is then removed and a blue trashcan is put in its place. The folder and the trashcan are similar in size and due to that their Gaussian counterparts will not have large distances. Their main difference lies in the color signature they carry. Since the distance in their color is substantial, a new landmark with id 7412 is created to capture the change in the signature of the environment at that place of the map. The different landmark assigned to the cluster can be seen in Fig.\ref{pip:bounds}(b). In Fig.\ref{pip:bounds}(c) and(d) the positional decision boundary is displayed. In the initial position as shown in Fig.\ref{pip:bounds}(c) the yellow folder is assigned to the green landmark of the cloud. As the object is moved a different position in the cloud, it is being assigned to a different cluster. The reason the cluster is assigned to multiple landmarks is due to the fact that the folder is decomposed to several clusters and each one of them is being assigned to a different part of the cloud with respect to their position. This can be seen in Fig.\ref{pip:bounds}(c) where the bottom left of the folder is being assigned to the red cluster. Parameter tweaking is possible but the purpose of a general pipeline is to use as little parameter tweaking as possible so that the method is general and can easily be applied to a different scenarios. The exponential part of the distribution is responsible for the angle signature elements within a cluster have. Practically, having a large amount of different angle distributions in a single cluster leads to objects that have texture and their surface is not smooth. Having a very strict limit in the angle distribution can lead to very small clusters and subsequently to a high amount of landmarks within the data. Using the data from the kinect camera, angle distributions are sensitive to noise that is found in parts of the cloud that are near the end of Kinect sensor's range. Practical evaluation has shown that using an angle limit that is close to the average distance between angle signatures produces stable results and reasonably sized landmarks in the cloud. \subsection{EKF-slam experiments} Finally, the pipeline was used in real life scenarios as a sensor model in a Landmark based EKF slam algorithm and was tested in its precision and memory requirements. In Fig.\ref{slam} an end result of a slam session using the pipeline as a sensor model is shown. As the robot progresses through the environment, the EKF module requests from the sensor model to observe what landmarks are currently detected given the current cloud readings and the existing Landmark database. The pipeline follows the procedure defined in the general pipeline section and returns the landmarks currently detected. The end results shows the amount of landmarks the method requires in order to map a medium sized room. Each sphere represents an environment signature at that specific point the same way it was shown in the simple datasets sections. If the decision layer is chosen to be very strict the number of landmarks the algorithm outputs will increase, and the environment will be represented in more detail. It is important to notice that a strict decision layer must be handled with care as it can lead to a pipeline that continuously adds new landmarks making the sensor module non-converging. \begin{wrapfigure}{l}{0.4\textwidth} \begin{tabular}{c} \includegraphics[width=.353\textwidth]{slam110landmarks2} \\ (a) Slam session \\ \includegraphics[width=.353\textwidth]{memoryRequirements} \\ (b) Memory requirements \\[6pt] \end{tabular} \caption{SLAM session and memory requirements} \label{slam} \end{wrapfigure} The compression that method introduces is not directly observable from the figure. As the environment is being reduced from a cloud to a landmarks, the memory needs change from using point clouds to using the landmarks extracted in those clouds. Since every landmark represents a signature of the environment at this particular point, the compression is done by reducing a high amount of points to that specific signature. The number of parameters needed to define the three distributions in the signature is all the information this method requires and hence the memory gains are substantial. The expresiveness of the distributions greatly affects the objects as well as object shapes that could be represented. Finally it is always possible to extend the method presented by adding new features and new priors to those features. \subsection{Memory requirements} The memory requirements of the method is related to the Dirichlet strength parameter $\alpha$ of the sampler. As the $\alpha$ parameter increases, the sampler will output more clusters on every iteration. The higher amount the of cluster will result a higher amount of landmarks and, consequently, in larger memory requirements in the method. Fig.\ref{slam}(b) shows memory requirements as a function of strength parameter alpha. As it can be seen, the number of landmarks follows the logarithmic trend of the Dirichlet prior in relation to $\alpha$. Constantly increasing the alpha value will not make the algorithm follow the logarithmic trend indefinitely. That is due to the fact that despite the alpha increase, the decision layer has an upper bound to the number of landmarks it can have and will eventually saturate. It must also be noted that as the alpha parameter is set to higher numbers, the sampler outputs more clusters making it a more accurate environment descriptor, but also takes more time, making it non feasible to use in real time scenarios. Values of alpha between 0 and 10 provide a robust and fast enough sampler that can be used in online mapping scenarios. \subsection{Limits of the method} Limits of the method exist in relation to the two basic layers of the pipeline. The clustering layer and the decision making. \subsubsection{Clustering layer} \begin{wrapfigure}{l}{.6\textwidth} \begin{tabular}{cc} \includegraphics[width=.26\textwidth]{singlecluster} & \includegraphics[width=.26\textwidth]{onlyclusters2} \\ (a) Low $\alpha$ & (b) High $\alpha$ pipeline\\ \end{tabular} \caption{Pipeline fail cases.} \label{pip:limits} \end{wrapfigure} An important limit of the pipeline exists with respect to the Dirichlet hyper-parameter $\alpha$. During the clustering, choosing a correct value for the hyper-parameter $\alpha$ is very important in order to have optimal results. Having sampler run with a very low $\alpha$ can lead to the whole cloud being assigned a single cluster. Having every point in the cloud being part of the same cluster leads to a a significant amount of information loss as no environment specific information are incorporated to the cloud. Fig.[\ref{pip:limits}](a) shows the behaviour of the sampler for $\alpha$ values lower than 1. On the other hand, having a very large $\alpha$ can lead to a very large amount of clusters being output by the sampler every time. That can lead to a non-converging pipeline since every time a new landmark that does not fit the landmark database is output. This leads to many small clusters each containing small number of points each. Furthermore, having a high value in the hyperparameter leads to a slower sampling procedure since the complexity of the sampler is $O(TKLSN)$ where $K$ is the number of clusters. Fig.[\ref{pcl:kl}](b) displays the behavior of the sampler for very large values $\alpha$. The spheres represent landmarks and it can be seen that most of the points in the environment are considered individually clusters. \subsubsection{Decision layer} The restrictions imposed by the decision layer are straightforward due to the constant nature of the decision process. Taking very small distances in the distance threshold operation can lead to pipeline that continuously adds new landmarks to the database. An example is shown in Fig.~\ref{pip:expo} where limiting the landmark matching operation to very small exponential distances, leads to a cloud where objects are decomposed to a lot entitites. \begin{wrapfigure}{l}{.45\textwidth} \includegraphics[width=.4\textwidth]{monsterchair} \\ \caption{Strict exponential limit} \label{pip:expo} \end{wrapfigure} Object decomposition is even more intense when camera movement is involved due to the different angles. Finally, since the number of landmarks is also a function of noise, areas of the cloud that are near the maximum range of the sensor can lead to different landmarks added frequently. %------------------------------------------------ \section{Conclusion and future work} \label{sec:discussion} In this paper a novel method for cloud representation by using non-parametric Bayesian tools was introduced. By reducing parts of the environment to distribution signatures, a large amount of compression is provided making the method highly suitable to target long term slam problems; the expresiveness of the representation is enough to perform slam and is robust even in the noisy parts of the environment. The strengths and weaknesses of the method were presented and an application on how this method could be used to tackle the compression problem of long term slam were given. There is a number of directions which could be explored to improve the method. Choosing more complex environment representations could increase the expresive strength of the sampler making it easier to represent with higher precision more complex structures. Also, a hierarchical clustering approach would be an interesting extension since it would both have the ability to capture the structure in a top-down maner and as well as handle the dynamic component that long term slam problems introduce. Furthermore, having a more complex decision layer would also increase the robustness of the method making it able to handle more complex environment structures and more expresive landmarks. Finally, those additions could help lift the dynamic environment restrictions that were defined in Walcotts PhD thesis and make the pipeline tackle in a fully Bayesian way the implications of life-long slam. %---------------------------------------------------------------------------------------- % REFERENCE LIST %---------------------------------------------------------------------------------------- \begin{thebibliography}{99} % Bibliography - this is intentionally simple in this template \bibitem{probRobs} \newblock Thrun, S. (2002). Probabilistic robotics. Communications of the ACM, 45(3), 52-57. \bibitem{ekf} \newblock Bailey, T., Nieto, J., Guivant, J., Stevens, M., \& Nebot, E. (2006, October). Consistency of the EKF-slam algorithm. In Intelligent Robots and Systems, 2006 IEEE/RSJ International Conference on (pp. 3562-3568). IEEE. \bibitem{liflonglearning} \newblock Thrun, S., \& Mitchell, T. M. (1995). Lifelong robot learning. The Biology and Technology of Intelligent Autonomous Agents, 165-196. \bibitem{lifelongmaps} \newblock Konolige, K., \& Bowman, J. (2009, October). Towards lifelong visual maps. In Intelligent Robots and Systems, 2009. IROS 2009. IEEE/RSJ International Conference on (pp. 1156-1163). IEEE. \bibitem{aishalong} \newblock Walcott, A. (2011). Long-term robot mapping in dynamic environments. \bibitem{pcl} \newblock{Rusu, R. B., \& Cousins, S. (2011, May). 3d is here: Point cloud library (pcl). In Robotics and Automation (ICRA), 2011 IEEE International Conference on (pp. 1-4). IEEE.} \bibitem{slam++} \newblock Salas-Moreno, R. F., Newcombe, R. A., Strasdat, H., Kelly, P. H., \& Davison, A. J. (2013, June). Slam++: Simultaneous localisation and mapping at the level of objects. In Computer Vision and Pattern Recognition (CVPR), 2013 IEEE Conference on (pp. 1352-1359). IEEE. \bibitem{objslam} \newblock Selvatici, A. H., \& Costa, A. H. (2008). Object-based visual slam: How object identity informs geometry. \bibitem{objectpointslam} \newblock Choudhary, S., Trevor, A. J., Christensen, H. I., \& Dellaert, F. (2014, September). slam with object discovery, modeling and mapping. In Intelligent Robots and Systems (IROS 2014), 2014 IEEE/RSJ International Conference on (pp. 1018-1025). IEEE. \bibitem{objectDisc} \newblock Koo, S., Lee, D., \& Kwon, D. S. (2014, September). Unsupervised object individuation from RGB-D image sequences. In Intelligent Robots and Systems (IROS 2014), 2014 IEEE/RSJ International Conference on (pp. 4450-4457). IEEE. \bibitem{fpfh} \newblock{Fast point feature histogram.Rusu, R. B., Blodow, N., \& Beetz, M. (2009, May). Fast point feature histograms (FPFH) for 3D registration. In Robotics and Automation, 2009. ICRA'09. IEEE International Conference on (pp. 3212-3217). IEEE.} \bibitem{segOverview} \newblock {Rabbani, T., van den Heuvel, F., \& Vosselmann, G. (2006). Segmentation of point clouds using smoothness constraint. International Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences, 36(5), 248-253.} \bibitem{nonParam} \newblock{Wainwright, M. J., \& Jordan, M. I. (2008). Graphical models, exponential families, and variational inference. Foundations and Trends in Machine Learning, 1(1-2), 1-305.} `` \bibitem{omnimaper} \newblock{A.Trevor, J.Rogers, and H.Christensen. Omnimapper: A modular multimodal mapping framework. In IEEE International Conference on Robotics and Automation (ICRA), 2014} \bibitem{pointSeg} \newblock Trevor, A. J., Gedikli, S., Rusu, R. B., \& Christensen, H. I. (2013). Efficient organized point cloud segmentation with connected components. Semantic Perception Mapping and Exploration \bibitem{planarSeg} \newblock Unnikrishnan, R., \& Hebert, M. (2003, October). Robust extraction of multiple structures from non-uniformly sampled data. In Intelligent Robots and Systems, 2003. Proceedings. 2004 IEEE/RSJ International Conference on IEEE. \bibitem{planarSeg2} \newblock Rabbani, T., van den Heuvel, F., \& Vosselmann, G. (2006). Segmentation of point clouds using smoothness constraint. International Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences, 36(5), 248-253. \bibitem{smartSeg} \newblock Triebel, R., Shin, J., \& Siegwart, R. (2010, June). Segmentation and unsupervised part-based discovery of repetitive objects. In Robotics: Science and Systems (Vol. 2). \bibitem{smcddp} \newblock Neiswanger, W., Wood, F., \& Xing, E. (2014, August). The dependent dirichlet process mixture of objects for detection-free tracking and object modeling. In Proceedings of the Seventeenth International Conference on Artificial Intelligence and Statistics (pp. 660-668). \bibitem{corresp:first} \newblock Cree, M. J., Jefferies, M. E., \& Baker, J. T. Using 3D Visual Landmarks to Solve the Correspondence Problem in Simultaneous Localisation and Mapping. \bibitem{corres:sec} \newblock Lowe, D. G. (2004). Distinctive image features from scale-invariant keypoints. International journal of computer vision, 60(2), 91-110. \bibitem{corres:three} \newblock Lamon, P., Tapus, A., Glauser, E., Tomatis, N., \& Siegwart, R. (2003, October). Environmental modeling with fingerprint sequences for topological global localization. In Intelligent Robots and Systems, 2003. Proceedings. 2003 IEEE/RSJ International Conference on IEEE. \bibitem{corres:four} \newblock Sehgal, A., Cernea, D., \& Makaveeva, M. (2010). Real-time scale invariant 3D range point cloud registration. In Image Analysis and Recognition (pp. 220-229). Springer Berlin Heidelberg. \bibitem{bayes:neal} \newblock Neal, R. M. (2000). Markov chain sampling methods for Dirichlet process mixture models. Journal of computational and graphical statistics, 9(2), 249-265. \bibitem{bayes:jordan} \newblock Blei, D. M., \& Jordan, M. I. (2006). Variational inference for Dirichlet process mixtures. Bayesian analysis, 1(1), 121-143. \bibitem{slam} \newblock{Montemerlo, M., Thrun, S., Koller, D., \& Wegbreit, B. (2002). Fastslam: A factored solution to the simultaneous localization and mapping problem. AAAI/IAAI, 593-598.} \bibitem{bayes:smc} \newblock Doucet, A., De Freitas, N., \& Gordon, N. (2001). An introduction to sequential Monte Carlo methods (pp. 3-14). Springer New York. \bibitem{antoniak} \newblock{Charles E Antoniak,Mixtures of dirichlet processes with applications to bayesian nonparametric problems, The annals of statistics (1974), 1152–1174} \bibitem{caron} \newblock{F. Caron, M. Davy, and A. Doucet, Generalized Polya urn for time-varying Dirichlet process mixtures, 23rd Conference on Uncertainty in Artificial Intelligence (UAI’2007), Vancouver, Canada, July 2007, 2007} \bibitem{compendium} \newblock{Fink, D. (1997). A compendium of conjugate priors.} \bibitem{smc:theory} \newblock{Ülker, Y., Günsel, B., \& Cemgil, A. T. (2010). Sequential Monte Carlo samplers for Dirichlet process mixtures. In International Conference on Artificial Intelligence and Statistics} \bibitem{doucet} \newblock{Del Moral, P., Doucet, A., \& Jasra, A. (2006). Sequential monte carlo samplers. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 68(3), 411-436.} \bibitem{RANSAC} \newblock{Meer, P., Mintz, D., Rosenfeld, A., \& Kim, D. Y. (1991). Robust regression methods for computer vision: A review. International journal of computer vision, 6(1), 59-70.} \end{thebibliography} %---------------------------------------------------------------------------------------- \end{document}
{ "alphanum_fraction": 0.729487744, "avg_line_length": 87.2064, "ext": "tex", "hexsha": "490e478423fde1487ecb9abce4238182f65ee6f9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "415dd5156dd2d7272b5e846940fce79e8407bf8c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "hadjichristslave/SMC", "max_forks_repo_path": "Master Thesis paper/Paper.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "415dd5156dd2d7272b5e846940fce79e8407bf8c", "max_issues_repo_issues_event_max_datetime": "2015-07-13T13:12:04.000Z", "max_issues_repo_issues_event_min_datetime": "2015-05-27T12:34:03.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "hadjichristslave/SMC", "max_issues_repo_path": "Master Thesis paper/Paper.tex", "max_line_length": 1514, "max_stars_count": null, "max_stars_repo_head_hexsha": "415dd5156dd2d7272b5e846940fce79e8407bf8c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "hadjichristslave/SMC", "max_stars_repo_path": "Master Thesis paper/Paper.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 13644, "size": 54504 }
%***************************************** \chapter{Data}\label{06:data} %***************************************** \begin{wrapfigure}{r}{0.4\textwidth} \label{06:fig01} \centering \includegraphics[width=0.4\textwidth]{gfx/06-data} \end{wrapfigure} Data are\footnote{The word ``data'' is plural and is treated that way in this text even though modern usage seems to be trending toward a singular form.} a collection of facts about some topic. As examples, a ``customer loyalty'' program gathers data from customers on how often they shop, what they purchase on each trip, what time of day they typically shop, and all sorts of other data. When data are interpreted in some way they become information. The types of analyses that can be done with data are limited by the types of data involved. The purpose of this chapter is to introduce various concepts about data and show how they can be analyzed.\footnote{Photo by Mika Baumeister on Unsplash} \begin{center} \begin{objbox}{Objectives} \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parskip}{0pt} \setlength{\parsep}{0pt} \item Define the types of data \item Determine the difference between data and rating scales \item Define the three primary properties of data: distribution, excess kurtosis, and skew \item Define database and fine several public databases that may be useful for research projects \item Selecting the correct statistical test for the project \item Define the types of statistical tests used for central measures, spread, frequency, correlation, parametric hypothesis testing, and nonparametric hypothesis testing. \item Define data mining and discuss the three most common forms of data mining: clustering, decision trees, and market basket \end{itemize} \end{objbox} \end{center} \subsection{Types of Data} Psychologist Stanley Smith Stevens defined four generic types of data\cite{stevens1946theory}. \begin{itemize} \item \textbf{\Gls{qualitativedata}} groups observations into a limited number of categories; for example, type of pet (cat, dog, bird, etc.) or place of residence (Arizona, California, etc.). Because qualitative data do not have characteristics like means or standard deviations, they are analyzed using non-parametric tests, like \textit{Kruskal-Wallis H} and \textit{Mann-Whitney U}. Qualitative data can be further divided into two sub-types, nominal and ordinal. \begin{itemize} \item \textbf{\Gls{nominaldata}} are categories that do not overlap and have no meaningful order, they are merely labels for attributes. Examples of nominal data include occupations (custodial, accounting, sales, etc.) and blood type (A, B, AB, O). A special subcategory of nominal data is binary, or dichotomous, where there are only two possible responses, like ``yes'' and ``no''. Nominal data are sometimes stored in a database using numbers but they cannot be treated like numeric data. For example, binary data, like ``Do you rent or own your home?'' can be stored as ``1 = rent, 2 = own'' but the numbers in this case have no numeric significance and could be replaced by words like ``Rent'' and ``Own.'' \item \textbf{\Gls{ordinaldata}} are categorical data but, unlike nominal, the categories imply some sort of order (which is why it is called ``ordinal'' data). One example of ordinal data is the ``star'' rating system for movies. It is clear that a five-star movie is somehow better than a four-star movie but there is no way to quantify the difference between those two categories. As another example, it is common for hospital staff members to ask patients to rate their pain level on a scale of one to ten. If a patient reports a pain level of ``seven'' but after some sort of treatment later reports a pain level of ``five'' then the pain has clearly decreased but it would be impossible to somehow quantify the exact difference in those two levels. Ordinal scales are most commonly used with Likert-type survey questions where the responses are selections like ``Strongly Agree'', ``Agree'', ``Neutral'', ``Disagree'', ``Strongly Disagree''. Ordinal data are also used when numeric data are grouped. For example, if a dataset included respondents' ages then those numbers could be grouped into categories like ``$ 20-29 $'' and ``$ 30-39 $.'' Those groups would typically be stored in the dataset as a single number so maybe ``$ 2 $'' would represent the ages ``$ 20-29 $,'' which would be ordinal data. \end{itemize} \item \textbf{\Gls{quantitativedata}} are numbers, typically counts or measures, like a person's age, a tree's height, or a truck's weight. Quantitative data are measured with scales that have equal divisions so the difference between any two values can be calculated. Quantitative data are discrete if they are represented by integers, like the count of words in a document, or continuous if they are represented by fractional numbers, like a person's height. Because quantitative data include characteristics like means and standard deviations, they are analyzed using parametric tests, like \textit{T-tests} and \textit{Analysis of Variance (ANOVA)}. Quantitative data can be further divided into two sub-types, interval and ratio. \begin{itemize} \item \textbf{\Gls{intervaldata}} use numbers to represent quantities where the distance between any two quantities can be calculated but there is no true zero point on the scale. One example is a temperature scale where the difference between $ 80 $\textdegree and $ 90 $\textdegree is the same as the difference between $ 60 $\textdegree and $ 70 $\textdegree. It is important to note that interval data do not include any sort of true zero point, thus zero degrees Celsius does not mean ``no temperature,'' and without a zero point it is not reasonable to make a statement like $ 20 $\textdegree is twice as hot as $ 10 $\textdegree.\footnote{To be fair, temperature does have a zero point, called ``absolute zero.'' However, the value of zero degrees Celsius or Fahrenheit is not ``no temperature.''} \item \textbf{\Gls{ratiodata}} use numbers to describe a specific measurable distance between two quantities; however, unlike interval data, ratio data have a true zero point. A good example of ratio data is the sales report for an automobile dealership. Because the data are a simple count of the number of automobiles sold it is possible to compare one month with another. Also, since the scale has a true zero point (no sales for the month) it is possible to state that one month had twice the sales of another. \end{itemize} \end{itemize} \section{Rating Scale} When working with qualitative data, it is important for researchers to determine a rating scale, also called levels of measure, to record data gathered about any one attribute. For example, male-female-other, M-F-O, and $ 1 $-$ 2 $-$ 3 $ are three potential rating scales for the attribute \textit{gender}. A researcher could use any of these scales, or devise a completely different one, as long as the scale is used consistently throughout the entire research project. It is easy to imagine that many rating scales exist but the most common ones are \textit{binary}, \textit{Likert}, \textit{semantic differential}, and \textit{Guttman}. \begin{description} \item[\Gls{binaryscale}] Binary scales are nominal scales consisting of binary items that assume one of only two possible values, such as yes or no, true or false, and so on. For example, a typical binary scale for a ``political activism'' construct may consist of the six binary items shown in Table \ref{tab06.02}. Each item in this scale is a binary item, and the total number of ``yes'' indicated by a respondent (a value from $ 0 $ to $ 6 $) can be used as an overall measure of that person's political activism. Binary scales can also employ other values, such as male or female for gender, full-time or part-time for employment status, and so forth. If an employment status item is modified to allow for more than two possible values (e.g., unemployed, full-time, part-time, and retired), it is no longer binary, but still remains a nominal scaled item. \begin{table}[H] \centering \begin{tabularx}{0.95\linewidth}{p{0.70\linewidth}p{0.09\linewidth}p{0.09\linewidth}} \toprule \textbf{Question} & \textbf{Yes} & \textbf{No} \\ \midrule Have you ever written a letter to a public official? & $ \bigcirc $ & $ \bigcirc $ \\ Have you ever signed a political petition? & $ \bigcirc $ & $ \bigcirc $ \\ Have you ever donated money to a political cause? & $ \bigcirc $ & $ \bigcirc $ \\ Have you ever donated money to a candidate running for public office? & $ \bigcirc $ & $ \bigcirc $ \\ Have your ever written a political letter to the editor of a newspaper?& $ \bigcirc $ & $ \bigcirc $ \\ Have you ever persuaded someone to change his/her voting plans? & $ \bigcirc $ & $ \bigcirc $ \\ \bottomrule \end{tabularx} \caption{Political activism binary scale} \label{tab06.02} \end{table} \item[\Gls{likertscale}] Designed by Rensis Likert, this is a very popular rating scale for measuring ordinal data in business research. This scale includes Likert items that are simply-worded statements to which respondents can indicate their extent of agreement or disagreement on a five or seven-point scale ranging from ``strongly disagree'' to ``strongly agree.'' A typical example of a six-item Likert scale for the ``employment self-esteem'' construct is shown in table \ref{tab06.03}. Likert scales are summed scales, that is, the overall scale score may be a summation of the attribute values of each item as selected by a respondent. \begin{table}[H] \centering \begin{tabularx}{0.95\linewidth}{p{0.35\linewidth}p{0.10\linewidth}p{0.08\linewidth}p{0.07\linewidth}p{0.07\linewidth}p{0.08\linewidth}} \toprule {\footnotesize Statement} & {\footnotesize Strongly disagree} & {\footnotesize Disagree} & {\footnotesize Neutral} & {\footnotesize Agree} & {\footnotesize Strongly agree} \\ \midrule {\footnotesize I feel good about my job} & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ \\ {\footnotesize I get along well with others at work} & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ \\ {\footnotesize I'm proud of my relationship with my supervisor} & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ \\ {\footnotesize I feel like I'm making a contribution at work} & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ \\ {\footnotesize I can tell that my coworkers respect me} & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ \\ \bottomrule \end{tabularx} \caption{Likert scale for employee self-esteem} \label{tab06.03} \end{table} Likert items allow for more granularity (more finely tuned response) than binary items, including whether respondents are neutral to the statement. Three or nine values (often called ``anchors'') may also be used, but it is important to use an odd number of values to allow for a ``neutral'' (or ``neither agree nor disagree'') anchor. Some studies have used a ``forced choice approach'' to force respondents to agree or disagree with the Likert statement by dropping the neutral mid-point and using even number of values, but this is not a good strategy because some people may indeed be neutral to a given statement and the forced choice approach does not provide them the opportunity to record their neutral stance. A key characteristic of a Likert scale is that even though the statements vary in different items or indicators, the anchors (``strongly disagree'' to ``strongly agree'') remain the same. Likert scales are ordinal scales because the anchors are not necessarily equidistant, even though sometimes they are treated like interval scales. \item[\Gls{semanticdiffscale}] This is a multi-item scale where respondents are asked to indicate their opinions or feelings toward a single statement using different pairs of adjectives framed as polar opposites. For instance, the construct ``attitude toward health insurance'' can be measured using three items shown in Table \ref{tab06.04}. As in the Likert scale, the overall scale score may be a summation of individual item scores. Notice that in Likert scales, the statement changes but the anchors remain the same across items. However, in semantic differential scales, the statement remains constant, while the anchors (adjective pairs) change across items. Semantic differential is believed to be an excellent technique for measuring people's attitude or feelings toward objects, events, or behaviors. \begin{table}[H] \centering \begin{tabularx}{0.95\linewidth}{p{0.10\linewidth}p{0.10\linewidth}p{0.10\linewidth}p{0.10\linewidth}p{0.10\linewidth}p{0.10\linewidth}p{0.10\linewidth}} \toprule \multicolumn{7}{p{0.95\linewidth}}{How would you rate your opinion on health insurance?} \\ \midrule {} & {\footnotesize Very Much} & {\footnotesize Much} & {\footnotesize Neutral} & {\footnotesize Much} & {\footnotesize Very Much} & {} \\ \midrule {\footnotesize Good} & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & {\footnotesize Bad} \\ {\footnotesize Useful} & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & {\footnotesize Useless} \\ {\footnotesize Caring} & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & $ \bigcirc $ & {\footnotesize Uncaring} \\ \bottomrule \end{tabularx} \caption{Semantic differential scale} \label{tab06.04} \end{table} \item[\Gls{guttmanscale}] Designed by Louis Guttman, this composite scale uses a series of items arranged in increasing order of intensity of the construct of interest, from least intense to most intense. As an example, the construct ``attitude toward immigrants'' can be measured using five items shown in Table \ref{tab06.05}. Each item in the Guttman scale has a weight (not indicated on the scale) which varies with the intensity of that item, and the weighted combination of each response is used as aggregate measure of an observation. \begin{table}[H] \centering \begin{tabularx}{0.95\linewidth}{p{0.70\linewidth}p{0.10\linewidth}p{0.10\linewidth}} \toprule \multicolumn{3}{p{0.95\linewidth}}{How will you rate your opinion on the following statements about immigrants?} \\ \midrule Do you mind immigrants being citizens of your country? & Yes & No \\ Do you mind immigrants living in your own neighborhood? & Yes & No \\ Would you mind living next door to an immigrant? & Yes & No \\ Would you mind having an immigrant as your close friend? & Yes & No \\ Would you mind if someone in your family married an immigrant? & Yes & No \\ \bottomrule \end{tabularx} \caption{Guttman scale} \label{tab06.05} \end{table} \end{description} \section{Properties of Data} \subsection{About The Normal Distribution (Bell Curve)} When the quantitative data gathered from some statistical project are plotted on a graph they often form a \gls{normaldistribution} (sometimes called a ``bell curve'' due to its shape). As an example, consider the Scholastic Aptitude Test (SAT) which is administered to more than $ 1.5 $ million high school students every year. Figure \ref{fig06.01} was created with fake data but illustrates the results expected of a typical SAT administration. \begin{figure}[H] \centering \includegraphics[width=\maxwidth{.95\linewidth}]{gfx/06-SATDistro} \caption{Normal Distribution} \label{fig06.01} \end{figure} SAT scores lie between $ 400 $ and $ 1600 $ as listed across the X-Axis and the number of students who earn each score is plotted. Since the most common score is $ 1000 $ that score is at the peak of the curve. Very few students scored above $ 1300 $ or below $ 650 $ and the curve is near the lower bound beyond those points. This illustrates a normal distribution where most scores are bunched near the center of the graph with only a few at either extreme. The normal distribution is important because it permits researchers to use specific techniques to test a hypothesis about the sample. For example, perhaps a researcher hypothesized that the graduation rate at university ``A'' was higher than at university ``B'' because students' SAT scores were higher. Since SAT scores have a normal distribution, the researcher could use specific tests, like a t-test, to support or refute the hypothesis. However, if the data were not normally distributed then the researcher would need to use a different group of tests. \subsection{Excess Kurtosis} One way to mathematically describe a normal distribution is to calculate the length of the tails of a bell curve, and that is called its \gls{excesskurtosis}. For a normal distribution the excess kurtosis is $ 0.00 $, a positive excess kurtosis would indicate longer tails while a negative excess kurtosis would indicate shorter tails. Intuitively, many people believe the excess kurtosis represents the ``peaked-ness'' of the curve since longer tails would tend to lead to a more peaked graph; however, excess kurtosis is a measure of the data outliers, which would be only present in the tails of the graph; so excess kurtosis is not directly indicative of the the ``sharpness'' of the peak. It is difficult to categorically state that some level of excess kurtosis is good or bad. In some cases, data that form a graph with longer tails are desired but in other cases they would be a problem. Following are four examples of excess kurtosis. Notice that as the excess kurtosis increases the tails become longer. \begin{figure}[H] \centering \includegraphics[width=\maxwidth{.95\linewidth}]{gfx/06-Kurtosis} \caption{Kurtosis in a Normal Distribution} \label{fig06.02} \end{figure} \subsection{Skew} The second numerical measure of a normal distribution that is frequently reported is its \gls{skew}, which is a measure of the symmetry of the curve about the mean of the data. The normal distribution in Figure \ref{fig06.03} has a skew of $ 0.00 $. A positive skew indicates that the tail on the right side is longer, which means that there are several data points on the far right side of the graph ``pulling'' the tail out that direction. A negative skew indicates that the tail on the left side of the graph is longer. Following are four examples of skew: \begin{figure}[H] \centering \includegraphics[width=\maxwidth{.95\linewidth}]{gfx/06-Skew} \caption{Skew in a Normal Distribution} \label{fig06.03} \end{figure} \section{Databases} When a lot of data are gathered into a single location they are referred to as a \gls{database}. This is not a new concept. Fifty years ago a library would have a series of 3X5 cards that contained information about all of the books in the library (title, author, subject, etc.). Those cards were stored in a wooden cabinet called the ``card catalog'' and customers could find information about and the location of whatever book they wanted. Today, databases are often contained in electronic form on the Internet where they can be accessed from customers' home computers, tablets, or even phones. Data in a database are typically stored in tables that resemble spreadsheets, that is, rows and columns where each row is one record (or observation) about some phenomenon and each column is one descriptor of that record. For example, a database that contains information about the people who work at a particular company would be organized such that each row contained data about just one person and each column would contain a single aspect of that person's employment, like name, employee number, date of birth, etc.\footnote{Of course, databases are much more complex than described in this paragraph but this is not a database text so the simple explanation offered is adequate for this context.} A database is designed to deliver answers to questions through a lookup process so, for example, if the CEO of a company wanted to know the birth date for someone in the accounting department those data could be easily found. One common problem with any database is ``dirty data.'' These are data that contain errors or are missing. For example, it is easy for a data entry clerk to enter something like ``1000000'' instead of ``100000'' (count the zeros) for a person's salary and create an ``outlier'' in the data. Another common problem are missing data. For example, if employees are asked to update their personal information but a few people could not remember their ZIP code then they would simply leave that field blank. Dirty data make it difficult to analyze the database. For example, if a researcher wanted to report the median salary for the workers in a factory but ten percent of the salaries were missing from the database then the median would not be accurate. There are several methods statisticians use to mitigate the problems caused by dirty data but those are beyond the scope of this text. \subsection{Public Databases} There are hundreds of publicly available databases that can be used for research. As one example, the United States Census Bureau maintains a huge database that contains information about the people of the United States.\footnote{The US Census Bureau's website is at \url{https://www.census.gov/}.} The data at that site are freely available to anyone who wants to use it, and the site is organized so information is fairly quick and easy to find. As an example, it is not difficult to discover that among adults in the United States, 29\% have a high school diploma, 20\% have a Bachelor's degree, 8\% have a Master's degree, 1.7\% have a Doctoral degree, and the rest fall elsewhere on the education spectrum. The US Census Bureau has advanced tools available that permit researchers to focus their search significantly. When using a public database, researchers must be concerned with bias. For example, if the database includes people's attitude towards work, how is a researcher going to know if the data gathered were from a well-designed, neutral survey or if it was just gathered using some sort of convenience sample? In general, databases found at governmental websites (with urls that end with .gov) or education websites (with urls that end with .edu) would more likely be bias-free while databases from .com sites would need to be carefully scrutinized. Sometimes, students will find a website with a list of links to journal articles or chapters from books. While these are valuable resources for a researcher, they are not the same as a database that contains raw data from a survey, experiment, or other activity. Journal articles provide good information for a literature review but would not be appropriate for an online database source. \subsection{Using Public Databases} As an example of using a public database, imagine that the CEO of ``BASVFOODS'' is interested in opening a neighborhood market in a small town they have never serviced before. In order to gather information about that location it is possible to use US Census Bureau data to find out things like the median household size, income, and education level. The CEO could then compare those data with similar data from a town that has a successful store to help inform a decision about opening a new store. \section{Statistical Test Selection} Once the data are gathered it is important to run appropriate statistical processes to see if the data contain anything of interest. There are hundreds of tests that can be used and researchers must consider both the goal of the analysis and the type of data being analyzed. The first step is to classify the variables being evaluated. \begin{itemize} \item \Gls{quantitativedata} are numeric and are generated through measurement or counting. \Gls{continuousdata} can be any value, including fractions or decimals, like a person's height or the length of time it takes to complete some task. \Gls{discretedata} are integers normally found by counting, like the number of people at an event or the age of a respondent. \item \Gls{qualitativedata} are non-numeric and are often generated by people checking boxes on a survey. \Gls{ordinaldata} have some implied order, like a student's class (senior, junior, etc.) and \gls{nominaldata} have no order, like a respondent's gender. \end{itemize} \begin{figure}[H] \centering \forestset{qtree/.style={for tree={parent anchor=south, child anchor=north, align=center, inner sep=3pt, draw}}} \begin{forest}, baseline, qtree [Variable, fill=purple!40!white [{Quantitative\\(Measure or count)}, fill=cyan!40!white [{Continuous\\(Any value)}, fill=cyan!20!white] [{Discrete\\(Integers)}, fill=cyan!20!white] ] [{Qualitative\\(Tick boxes)}, fill=pink!60!white [{Ordinal\\(Ordered)}, fill=pink!40!white] [{Nominal\\(No order)}, fill=pink!40!white] ] ] \end{forest} \caption{Classifying Variables} \label{fig06.08} \end{figure} Once researchers know the types of variables they are working with, the statistical tests they can use become evident. \subsection{Summary Statistics} One of the easiest type of analysis to complete is determining the center and spread of a dataset. This analysis is also one of the first that research readers expect to find. Here is a guide for what sort of central measure and spread to report. \begin{itemize} \item For normally distributed continuous data use the mean and standard deviation. \item For skewed continuous or discrete data use the median and Interquartile Range (IQR). \item For ordinal data use the median and Interquartile Range (IQR). \item For nominal data use the mode as a central measure but there is no spread. \end{itemize} \begin{figure}[H] \centering \forestset{qtree/.style={for tree={parent anchor=south, child anchor=north, align=center, inner sep=3pt, draw}}} \begin{forest}, baseline, qtree [Which summary and spread?, fill=brown!40!white [{Quantitative}, fill=orange!40!white [{Normal Distribution\\(Mean\\Standard deviation)}, fill=orange!20!white] [{Skewed Data\\(Median\\IQR)}, fill=orange!20!white] ] [{Qualitative}, fill=yellow!60!white [{Ordinal\\(Median\\IQR)}, fill=yellow!40!white] [{Nominal\\(Mode\\No spread)}, fill=yellow!40!white] ] ] \end{forest} \caption{Central Measure and Spread} \label{fig06.09} \end{figure} \subsection{Parametric vs. Nonparametric Tests} Normally, a researcher posits a hypothesis and then conducts research to either support or refute that hypothesis. The type of hypothesis test needed depends on the type of the variables being tested. There are two broad categories of variables in a hypothesis test: independent and dependent. \begin{itemize} \item \Glspl{independentvariable} (explanatory variables) are those that cause something to happen; they explain why some outcome was observed. For example, if the hypothesis is that women spend more on groceries than men then the independent variable is the sex of the shopper. If the hypothesis is that elderly drivers are more dangerous drivers than younger drivers then the independent variable is the age of the driver. \item \Glspl{dependentvariable} (outcome variables) are those that are the outcome for whatever is being observed. For example, if the hypothesis is that women spend more on groceries than men then the dependent variable is amount of money spent. If the hypothesis is that elderly drivers are more dangerous than younger drivers then the dependent variable is the number of reported accidents. \end{itemize} The chart in Figure \ref{fig06.10} is used to determine if the hypothesis test should be parametric or nonparametric. \begin{itemize} \item \Gls{parametric} tests assume that the data follow a particular distribution, usually normally distributed. Parametric tests are more powerful than nonparametric tests and are more likely to detect relationships or differences that exist. \item \Gls{nonparametric} tests are used when the dependent variable is not normally distributed; that is, the data are skewed or are qualitative. Nonparametric techniques are usually based on ranks rather than the actual data and are usually less powerful than parametric tests. \item \Gls{chisquare} is a test that is only used when the dependent variable is nominal in nature. A chi-square test determines if there is a significant difference in the actual observed frequencies and those hypothetically expected. For example, if a coin were tossed $ 100 $ times it would be expected to land ``heads'' half of the time but if the actual observation was that ``heads'' came up $ 75\% $ of the time then the chi-square statistic would indicate that this was a significant difference between what was expected and what was observed. \end{itemize} \begin{figure}[H] \centering \forestset{qtree/.style={for tree={parent anchor=south, child anchor=north, align=center, inner sep=3pt, draw}}} \begin{forest}, baseline, qtree [Dependent Variable, fill=green!40!white [{Quantitative}, fill=teal!40!white [{Normal Distribution\\(Parametric)}, fill=teal!20!white] [{Skewed Data\\(Nonparametric)}, fill=teal!20!white] ] [{Qualitative}, fill=lime!60!white [{Ordinal\\(Nonparametric)}, fill=lime!40!white] [{Nominal\\(Chi-square)}, fill=lime!40!white] ] ] \end{forest} \caption{Parametric vs. Nonparametric Selection} \label{fig06.10} \end{figure} \subsection{Hypothesis Test Selection} Researchers often hypothesize\footnote{Chapter \ref{ch14:mixed} contains more information about hypothesis testing.} that some treatment will lead to an outcome. In order to test that, they will apply the treatment to one group but not another and then compare the two groups to see if there is any difference. Table \ref{tab06.07} lists the statistical tests that are commonly used to compare the means of two or more groups. In the table, ``independent'' groups are those that have no overlapping members while ``matched'' groups test the same members two times. As an example, if researchers wanted to know how if a movie affected people in some way they could survey people leaving two different theaters (independent groups) or survey the same people before and after seeing the movie (matched groups). \begin{table}[H] \centering \definecolor{ltgray}{gray}{0.95} % this is a light gray \rowcolors{1}{}{ltgray} % zebra striping background \begin{tabularx}{0.95\linewidth}{p{0.25\linewidth} p{0.15\linewidth} p{0.15\linewidth} p{0.15\linewidth} p{0.15\linewidth} } \toprule \textbf{Comparing} & \textbf{Dep Var} & \textbf{Ind Var} & \textbf{Para} & \textbf{NonPara} \\ \midrule 2 independent \newline groups & Quant & Binary & Indep t-test & Mann-Whitney \\ 3+ independent \newline groups & Quant & Nom & ANOVA & Kruskal-Wallis \\ 2 matched \newline groups & Quant & Time & Paired t-test & Wilcoxon \\ 3+ measures \newline same subj & Quant & Time & Rep ANOVA & Friedman \\ \bottomrule \end{tabularx} \caption{Tests to Compare Two or More Samples} \label{tab06.07} \end{table} Another common research goal is to see if there is any association (or ``correlation'') between two or more variables. Further, a correlation may be able to predict an outcome for a new observation. For example, a researcher may run an experiment where students work with a tutor once a week and then see if there is an improvement in test scores. In this case, ``tutoring time'' would be correlated with ``test scores'' and that may be used to predict a new student's test score based on the amount of tutoring time. Table \ref{tab06.08} lists the hypothesis tests that are commonly used to find correlations and predictions between two groups. \begin{table}[H] \centering \definecolor{ltgray}{gray}{0.95} % this is a light gray \rowcolors{1}{}{ltgray} % zebra striping background \begin{tabularx}{0.95\linewidth}{p{0.25\linewidth} p{0.15\linewidth} p{0.15\linewidth} p{0.15\linewidth} p{0.15\linewidth} } \toprule \textbf{Comparing} & \textbf{Dep Var} & \textbf{Ind Var} & \textbf{Para} & \textbf{NonPara} \\ \midrule 2 Continuous & Quant & Quant & Pearson's r & Spearman's rho \\ Prediction & Quant & Any & Regression & None \\ Prediction & Nominal & Any & Log \newline regression & None \\ 2 Qual vars & Qual & Qual & None & Chi-square \\ \bottomrule \end{tabularx} \caption{Tests to Association Between Two Samples} \label{tab06.08} \end{table} \section{Statistical Test Sampler} While there are hundreds of statistical procedures available, this section covers those that are commonly used. \subsection{Central Measures} Three different central measures are commonly used, depending on the type of data being summarized. Calculating these values is normally done with software since some of the calculations are quite complex. \begin{itemize} \item \textbf{Mean}. An arithmetic mean is calculated by adding together all of the terms and then dividing by the number of terms. This process is taught in elementary school as calculating the ``average.'' However, if the terms have wildly different values then a \textit{geometric mean} is a better choice. In a geometric mean the values are multiplied together and then the n-root is taken, where ``n'' is the number of terms in the data. Finally, if the mean of a series of rates is needed, then a \textit{harmonic mean} is used. For a harmonic mean all of the terms are reciprocated, a mean is found of those reciprocated terms, and then the mean is reciprocated. \item \textbf{Median}. A median is found by putting all of the terms in numeric order and then selecting the middle term. This is useful if the dataset includes outliers, that is, a few values far outside the other terms. Medians are frequently used to report house values since a few houses may be worth far more than the other houses in an area. If the dataset has an even number of terms, so there is no middle term, then the median is found by taking the mean of the middle two terms. \item \textbf{Mode}. The mode is used for nominal data and is the count of the most common term. For example, if a count of the number of undergraduate students in each class (senior, junior, etc.) is made and it turns out that there are more seniors than any other group then the mode would be ``senior.'' \end{itemize} \subsection{Spread} Two measures of spread are commonly used. \begin{itemize} \item \textbf{Range}. The range, or ``dispersion,'' is the difference between the highest value and lowest value. For example, if a survey recorded the ages of respondents and the greatest age was 70 while the least age was 30 then the range would be 40. \item \textbf{Standard deviation}. This often misunderstood value is nothing more than an indicator of how much variation exists in the data, or how ``scattered out'' the data are. In general, the greater the standard deviation the more variation there are in the data. As an example, imagine that a professor administered an examination to a group of students and found the mean to be $ 70\% $ with a standard deviation of $ 15 $. Then the professor changed something about the class the next semester and administered the same examination to that group of students and found the mean was $ 85\% $ with a standard deviation of $ 5 $. This result would indicate that the scores the second semester had much less variation, or they were grouped much ``tighter,'' and that would likely be good news for the professor. \end{itemize} \subsection{Frequency Tables} Discrete or qualitative data items are normally reported in frequency tables where the counts for a particular item are displayed. When a frequency table has two dimensions it is usually called a ``cross-tab'' or ``pivot table.'' As an example, Table \ref{tab06.02} contains the results of an exit poll from 2016 presidential election. \cite{cnn2016election} \begin{table}[H] \centering \definecolor{ltgray}{gray}{0.95} % this is a light gray \rowcolors{1}{}{ltgray} % zebra striping background \begin{tabularx}{0.95\linewidth}{ p{0.22\linewidth} p{0.22\linewidth} p{0.22\linewidth} p{0.22\linewidth}} \toprule \textbf{Party} & \textbf{Clinton} & \textbf{Trump} & \textbf{Other} \\ \midrule Democrats & $ 89\% $ & $ 8\% $ & $ 3\% $ \\ Republicans & $ 8\% $ & $ 88\% $ & $ 4\% $ \\ Independents & $ 42\% $ & $ 46\% $ & $ 12\% $ \\ \bottomrule \end{tabularx} \caption{2016 Exit Poll.} \label{tab06.02} \end{table} \subsection{Correlation} \Gls{correlation} is a method used to describe a relationship between the independent (or x-axis) and dependent (or y-axis) variables in a research project. A correlation is expressed as a number between $ -1.0 $ and $ +1.0 $ where the closer the correlation gets to the extremes the more meaningful it becomes. Thus, two variables with a correlation of $ +0.65 $ have a closer association than two variables with a correlation of $ +0.23 $. Correlations are often displayed in a matrix. Table \ref{tab06.06} shows the Miles per Gallon, Displacement, and Horsepower for a group of automobiles. In this table, notice that there is a negative correlation between horsepower and miles per gallon, which means that cars with greater horsepower get fewer miles per gallon. On the other hand, there is a positive correlation between displacement and horsepower so cars with greater engine displacement (larger engines) have more horsepower. \begin{table}[H] \centering \definecolor{ltgray}{gray}{0.95} % this is a light gray \rowcolors{1}{}{ltgray} % zebra striping background \begin{tabularx}{0.95\linewidth}{ p{0.22\linewidth} p{0.22\linewidth} p{0.22\linewidth} p{0.22\linewidth}} \toprule {} & mpg & disp & hp \\ mpg & $ +1.00 $ & $ -0.85 $ & $ -0.78 $ \\ disp & $ -0.85 $ & $ +1.00 $ & $ +0.79 $ \\ hp & $ -0.78 $ & $ +0.79 $ & $ +1.00 $ \\ \bottomrule \end{tabularx} \caption{Selected Automobile Correlations.} \label{tab06.06} \end{table} \subsection{Parametric Hypothesis Tests} Some analysis techniques are only useful with \gls{parametric} data. While there are dozens of statistical processes that will work with parametric data, two are most commonly seen. \begin{itemize} \item \textbf{\Gls{ttest}}. A t-test is used to analyze the difference in two groups of samples that are normally distributed. For example, a researcher may hypothesize that there is a significant difference in the ages of people in two towns. Once people's ages are recorded a t-test could be used to see if there is a significant difference in the mean age for the people in those two towns. There are two varieties of t-test commonly used, depending on the type of data being analyzed. If the two groups being compared are independent of each other then an ``independent t-test'' would be used. However, researchers often use the same group but test them at two different times. For example, a medical trial may measure some factor (like blood presure), apply some treatment, and then measure the factor a second time. In that case, a ``paired t-test'' would be used where the values from the first trial would be compared to those in the second. \item \textbf{Analysis of Variance (ANOVA)}. An \Gls{anova} is similar to a t-test but is used to analyze three or more groups to see if there is a significant difference in any of the groups. \end{itemize} The result of either a t-test or ANOVA is a \textit{p-value} (``probability value''). A p-value describes the probability that some result was caused by pure chance and not from some applied treatment. P-values are expressed as percentages and, normally, researchers expect a p-value under $ 0.05 $ ($ 5\% $) in order to declare the result to be significant. If the calculated p-value is above $ 0.05 $ then the researcher declares that no significant result was found. \subsection{Nonparametric Hypothesis Tests} Some analysis techniques are only useful with \gls{nonparametric} data. While there are dozens of statistical processes that will work with nonparametric data, two are most commonly seen. \begin{itemize} \item \textbf{Mann-Whitney U}. This test is used to determine if there are any significant differences in two groups of data that are not normally-distributed, often categorical. As an example of using a Mann-Whitney U test, Gluck et al. used it in \textit{How Short Is Too Short? Implications of Length and Framing on the Effectiveness of Privacy Notices}\cite{gluck2016short}. This study postulated that privacy notices were so long people did not often read, or understand, them. They conducted an experiment where they presented groups of people with both short and long privacy notices and then assessed their understanding. A Mann-Whitney U test was used to compare the groups and, not surprisingly, people who used the short form seemed to understand their privacy rights better than those who used a long form. This experiment also varied the framing of the policy (positive vs. negative) and used a Kruskal-Wallis H test to compare the results but they found no effect provided by different framing. This study is a good example of using both Kruskal-Wallis H and Mann-Whitney U in a single analysis. \item \textbf{Kruskal-Wallis H}. This test is used to determine if there are any significant differences in three or more groups of data that are not normally-distributed, often categorical. As an example of using a Kruskal-Wallis H test, Titlebaum and Lawrence published \textit{Perceived Motivations for Corporate Suite Ownership in the ``Big Four'' Leagues}\cite{titlebaum2016perceived}. This study looked at $ 29 $ different reasons (entertain clients, support the community, and business-to-business networking, etc.) corporations purchase luxury suites in any of the four major sports leagues (NFL, MLB, NBA, and NHL). They surveyed corporate leadership and then applied a Kruskal-Wallis H test to the survey results to see which reason was most important. They found four that were significant: 1) Entertaining Employees, 2) Supporting the Community, 3) Perception of the Company in the Community, and 4) Customized Gifts for Suite Owners. They also compared the leagues, two at a time, to see if the reasons were different between the leagues and used a Mann-Whitney U test for this part of the analysis. They found that MLB had a significantly higher rank for ``Entertaining Employees'' than all of the other sports. This study is a good example of using both Kruskal-Wallis H and Mann-Whitney U in a single analysis. \end{itemize} The result of either a Mann-Whitney U or Kruskal-Wallis H is a \gls{pvalue} (``probability value''). A p-value describes the probability that some result was caused by pure chance and not from some applied treatment. P-values are expressed as percentages and, normally, researchers expect a p-value under $ 0.05 $ ($ 5\% $) in order to declare the result to be significant. If the calculated p-value is above $ 0.05 $ then the researcher declares that no significant result was found. \section{Data Mining} Data mining is a relatively new statistical technique that attempts to extract (``mine'') valuable intelligence from massive databases. Raval\cite{raval2012data} published an excellent overview of data mining techniques. As but one example, a grocery store chain mines customer purchases, shopping habits, and other data to plan sales, send coupons, and make other suggestions that will, hopefully, lead to increased spending. While there are more than a dozen data mining techniques, these three are most common. \subsubsection{Clustering} It is beneficial to ``cluster'' customers in some way so advertising (and sales) can be more effective. For example, if a large grocery store chain can determine that most of the customers in one region share some common trait then it becomes easier to market to that region. Figure \ref{fig06.11} shows a scatter plot with three clusters. This plot was generated from dummy data but it shows the type of clustering that a researcher may be able to find in a dataset. The location of a specific cluster in relationship to the others would perhaps drive a marketing campaign focused on the specific properties the customers in that one cluster share. \begin{figure}[H] \centering \includegraphics[width=\maxwidth{.95\linewidth}]{gfx/06-Cluster} \caption{Example of Clustering} \label{fig06.11} \end{figure} As an example of using clustering, Vogel, Greiser, and Mattfeld publised \textit{Understanding Bike-Sharing Systems using Data Mining: Exploring Activity Patterns}\cite{vogel2011understanding} in 2011. They analyzed more than $ 760,000 $ pickups and returns at bike stations in Vienna from $ 2008-2009 $. Their goal was to determine if the stations could be clustered in some way by usage patterns. They used cluster analysis to divide the city's stations into five groups, for example, the ``returns morning pickups evening'' group showed especially high number of morning returns and evening pickups. They provided the city with information that was designed to help in future station placement. \subsubsection{Decision Tree} A decision tree organizes known data so predictions can be made on new data. Figure \ref{fig06.05} is taken from a dataset that shows the fate of all the passengers on the Titanic. Starting at the top, few males survived. Of the females, there were no survivors who had more than $ 4.5 $ siblings or spouses on board, so large families perished. Notice, though, that near the bottom of the tree nearly all first class female passengers with few family members on board survived. A decision tree like this could be used to predict whether a given passenger survived. \begin{figure}[H] \centering \includegraphics[width=\maxwidth{.95\linewidth}]{gfx/06-DecisionTree} \caption{Decision Tree} \label{fig06.05} \end{figure} As an example of using a decision tree, Ona, Ona and Lopez completed a study of the Grenada, Spain, public transportation system\cite{de2016transit}. They data mined $ 3,664 $ passenger surveys conducted from $ 2008 $ until $ 2011 $. The surveys asked passengers some demographic questions (age, etc.) and about various aspects of the bus service. The researchers first created four clusters of passenger types using cluster analysis, ``Young Students,'' ``Working Women,'' ``Sporadic Users,'' and ``Elderly Passengers.'' Then they generated five different decision trees, one for each group and one overall, to define the difference between ``poor'' and ``good'' service ratings from the passengers. For example, in the Overall tree, service frequency was the first, most important, branch on the tree; but for Young Students the first branch was punctuality. This is an excellent study that is easy to read and understand and shows how two different data mining techniques, cluster analysis and decision tree building, can be used in a single study. \subsubsection{Market Basket} A market basket analysis evaluates the products that customers purchase at the same time (they are in the same ``market basket'') and then uses that data to drive decisions on things like store organization and sales. For example, it is well known that customers who purchase beer also tend to purchase potato chips. What may be surprising is that customers who purchase beer also tend to purchase baby diapers.\footnote{It is fun to speculate why these two items are related. Maybe parents run out of diapers in the middle of the night and while they are at the store they also by some beer. Maybe they make a beer run to the store and then remember that they may also need diapers. At any rate, if a store puts diapers on sale they may also want to consider a beer sale!} A market basket analysis does not attempt to determine why these relationships exist, just that they do exist. A grocery store owner may chose to put beer and chips on sale at the same time or create a display near the front of the store with both beer and chips. Figure \ref{fig06.06} is a graphic representation of the rule set found in Figure \ref{fig06.10}, created from a market basket analysis of a bakery. The goal was to determine what sorts of products customers purchased with cherry tarts. Rule $ 53 $ indicates that apricot danish and cherry tarts tend to be purchased at the same time, and that would be valuable information for a bakery owner. The explanation of the numbers displayed (``Support,'' ``Confidence,'' etc.) is beyond the scope of this class, but business owners would want to work with a researcher who could complete this sort of analysis. \begin{figure}[H] \centering \includegraphics[width=\maxwidth{.95\linewidth}]{gfx/06-MarketBasketGraph} \caption{Market Basket Graph} \label{fig06.06} \end{figure} \begin{figure}[H] \centering \includegraphics[width=\maxwidth{.95\linewidth}]{gfx/06-MarketBasketRules} \caption{Market Basket Rules} \label{fig06.07} \end{figure} As an example of a market basket analysis, Musalem, Aburto, and Bosch completed a study of a mid-sized supermarket in Latin America\cite{musalem2018market}. That market sells approximately $ 7,000 $ products and the average basket size is products from $ 3.6 $ categories. They reviewed the register receipts from the month of July 2000 and recorded the products sold, units sold, date, and time. They categorized the products into four categories: non-perishable (cereals, flour, noodles, etc.), immediate consumption (meat, milk, cheese, etc.), hygiene (shampoo, conditioner, diapers, etc.), and hedonic (ice cream, beer, candy, etc.). They found that items in the hygiene basket were associated with larger transactions but there were not many of them. The non-perishable basket had a high number of different categories and a high transaction size, so these buyers are extremely important to the market. This study is a good example of how market basket analysis can be used to improve sales. \section{Summary}\label{ch06:summary} Figure \ref{fig06.04} illustrates the relationship between the various types of data and the rating scales commonly used to work with those data types. Researchers with a positivism philosophy tend to use parametric statistical analysis and gather interval and ratio data. Researchers with an interpretivism philosophy tend to use nonparametric statistical analysis and gather nominal and ordinal data. Nominal data are typically gathered with binary and semantic difference rating scales while ordinal data are typically gathered with Likert and Guttman rating scales. \begin{figure}[H] \centering \includegraphics[width=\maxwidth{.95\linewidth}]{gfx/06-DataTypes} \caption{Data types} \label{fig06.04} \end{figure} There are many public databases available from various government and educational sources. Researchers may want to consider using those resources for their projects. Finally, there are, literally, hundreds of statistical tests available for research projects, but only a few are commonly used. This chapter discussed several statistical and data mining tests that are frequently encountered in research reports.
{ "alphanum_fraction": 0.7645959468, "avg_line_length": 92.3866906475, "ext": "tex", "hexsha": "2643fe527b68d09466914d4b178ebae3413e72bb", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4820367e11b021f28694e68a9b9729dee1a56dc4", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "grself/basv316_text_latex", "max_forks_repo_path": "Chapters/06Data.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4820367e11b021f28694e68a9b9729dee1a56dc4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "grself/basv316_text_latex", "max_issues_repo_path": "Chapters/06Data.tex", "max_line_length": 1327, "max_stars_count": null, "max_stars_repo_head_hexsha": "4820367e11b021f28694e68a9b9729dee1a56dc4", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "grself/basv316_text_latex", "max_stars_repo_path": "Chapters/06Data.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 12521, "size": 51367 }
\subsection{\soarb{save-backtraces}} \label{save-backtraces} \index{save-backtraces} Save trace information to explain chunks and justifications. \subsubsection*{Synopsis} save-backtraces [-ed] \end{verbatim} \subsubsection*{Options} \hline \soar{\soar{\soar{ -e, --enable, --on }}} & Turn explain sysparam on. \\ \hline \soar{\soar{\soar{ -d, --disable, --off }}} & Turn explain sysparam off. \\ \hline \end{tabular} \subsubsection*{Description} The \textbf{save-backtraces} variable is a toggle that controls whether or not backtracing information (from chunks and justifications) is saved. When \textbf{save-backtraces} is set to \textbf{off} , backtracing information is not saved and explanations of the chunks and justifications that are formed can not be retrieved. When \textbf{save-backtraces} is set to \textbf{on} , backtracing information can be retrieved by using the explain-backtraces command. Saving backtracing information may slow down the execution of your Soar program, but it can be a very useful tool in understanding how chunks are formed. \subsubsection*{See Also} \hyperref[explain-backtraces]{explain-backtraces}
{ "alphanum_fraction": 0.7702349869, "avg_line_length": 47.875, "ext": "tex", "hexsha": "97609c0c9ab2b56f2e40984c9fcbe134417cf0cf", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "74a6f32ba1be3a7b3ed4eac0b44b0f4b2e981f71", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "sleyzerzon/soar", "max_forks_repo_path": "Documentation/ManualSource/cli/save-backtraces.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "74a6f32ba1be3a7b3ed4eac0b44b0f4b2e981f71", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "sleyzerzon/soar", "max_issues_repo_path": "Documentation/ManualSource/cli/save-backtraces.tex", "max_line_length": 238, "max_stars_count": 1, "max_stars_repo_head_hexsha": "74a6f32ba1be3a7b3ed4eac0b44b0f4b2e981f71", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "sleyzerzon/soar", "max_stars_repo_path": "Documentation/ManualSource/cli/save-backtraces.tex", "max_stars_repo_stars_event_max_datetime": "2016-04-01T04:02:28.000Z", "max_stars_repo_stars_event_min_datetime": "2016-04-01T04:02:28.000Z", "num_tokens": 313, "size": 1149 }
\chapter{Further Calculus} \section{Maclaurin series} \section{Improper integrals} \section{Volumes of solids of revolution} \section{Mean values} \section{Partial fractions} \section{Inverse trigonometric and hyperbolic functions} \section{Further integration}
{ "alphanum_fraction": 0.8244274809, "avg_line_length": 29.1111111111, "ext": "tex", "hexsha": "08e29112808f024113d5c4fb3206c40b8f9d49a2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ee461679e51e92a0e4b121f28ae5fe17d5e5319e", "max_forks_repo_licenses": [ "Apache-2.0", "MIT" ], "max_forks_repo_name": "aDotInTheVoid/ltxmk", "max_forks_repo_path": "corpus/a-level-notes/furthermath/content/pure_core/calculus.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ee461679e51e92a0e4b121f28ae5fe17d5e5319e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0", "MIT" ], "max_issues_repo_name": "aDotInTheVoid/ltxmk", "max_issues_repo_path": "corpus/a-level-notes/furthermath/content/pure_core/calculus.tex", "max_line_length": 56, "max_stars_count": null, "max_stars_repo_head_hexsha": "ee461679e51e92a0e4b121f28ae5fe17d5e5319e", "max_stars_repo_licenses": [ "Apache-2.0", "MIT" ], "max_stars_repo_name": "aDotInTheVoid/ltxmk", "max_stars_repo_path": "corpus/a-level-notes/furthermath/content/pure_core/calculus.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 64, "size": 262 }
% % Clonedel.tex % % History of LulzBot Printers % % Copyright (C) 2014, 2015 Aleph Objects, Inc. % % This document is licensed under the Creative Commons Attribution 4.0 % International Public License (CC BY-SA 4.0) by Aleph Objects, Inc. % \section{LulzBot Clonedel Mars P6} LulzBot Clonedel Mars P6. \begin{figure}[h!] \thisfloatpagestyle{empty} \includegraphics[keepaspectratio=true,height=0.40\textheight,width=1.00\textwidth,angle=0]{clonedel/mars-p6-front.jpg} \caption{LulzBot Clonedel Mars P6 Front.} \label{fig:clonedel-mars-p6-front} \end{figure} %clonedel/mars-p6-bottom.jpg %clonedel/mars-p6-front.jpg %clonedel/mars-p6-top.jpg %clonedel/mars-p6-top-left.jpg %clonedel/mars-p6-top-right.jpg
{ "alphanum_fraction": 0.7633802817, "avg_line_length": 25.3571428571, "ext": "tex", "hexsha": "fdeebf244eb45e162ce9286412efb973a46c009f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1923ed04c79b0eb81338b8be3fe2f2d57dae6e07", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "alephobjects/history-of-lulzbot-printers", "max_forks_repo_path": "source/Clonedel-mars-p6.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1923ed04c79b0eb81338b8be3fe2f2d57dae6e07", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "alephobjects/history-of-lulzbot-printers", "max_issues_repo_path": "source/Clonedel-mars-p6.tex", "max_line_length": 118, "max_stars_count": null, "max_stars_repo_head_hexsha": "1923ed04c79b0eb81338b8be3fe2f2d57dae6e07", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "alephobjects/history-of-lulzbot-printers", "max_stars_repo_path": "source/Clonedel-mars-p6.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 247, "size": 710 }
\chapter{Expected outcomes} \label{ch-1}
{ "alphanum_fraction": 0.6888888889, "avg_line_length": 7.5, "ext": "tex", "hexsha": "5b151036e371d0733a9610f2c6ddfc3b948b9085", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "cf988f64aa400d4c398fe4cb6738a4b90264359d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "DocTrivial/Research-Proposal", "max_forks_repo_path": "MainText/chapter6.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "cf988f64aa400d4c398fe4cb6738a4b90264359d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "DocTrivial/Research-Proposal", "max_issues_repo_path": "MainText/chapter6.tex", "max_line_length": 40, "max_stars_count": null, "max_stars_repo_head_hexsha": "cf988f64aa400d4c398fe4cb6738a4b90264359d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "DocTrivial/Research-Proposal", "max_stars_repo_path": "MainText/chapter6.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 14, "size": 45 }
\section{Reinforcement Learning for Automated Trading} \label{sec:application_to_systematic_trading} Many financial applications can be seen as sequential decision problems which naturally fall in the stochastic optimal control framework introduced above. In this section we discuss how reinforcement learning algorithms can be applied to the asset allocation problem, where an agent invests his capital on various assets available in the market. \subsection{Asset Allocation With Transaction Costs} The asset allocation problem consists of determining how to dynamically invest the available capital in a portfolio of different assets in order to maximize the expected total return or another relevant performance measure. Let us consider a financial market consisting of $I+1$ different stocks that are traded only at discrete times $t \in \{0, 1, 2, \ldots\}$ and denote by ${Z}_t = {(Z_t^0, Z_t^1, \ldots, Z_t^I)}^T$ their prices at time $t$. Typically, $Z_t^0$ refers to a riskless asset whose dynamic is given by $Z_t^0 = {(1 + X)}^t$ where $X$ is the deterministic risk-free interest rate. The investment process works as follows: at time $t$, the investor observes the state of the market $S_t$, consisting for example of the past asset prices and other relevant economic variables, and subsequently chooses how to rebalance his portfolio, by specifying the units of each stock ${n}_t = {(n_t^0 , n_t^1 , \ldots , n_t^I)}^T$ to be held between $t$ and $t+1$. In doing so, he needs to take into account the transaction costs that he has to pay to the broker to change his position. At time $t+1$, the investor realizes a profit or a loss from his investment due to the stochastic variation of the stock values. The investor’s goal is to maximize a given performance measure. Let $W_t$ denote the wealth of the investor at time $t$. The profit realized between $t$ and $t+1$ is simply given by the difference between the trading results and the transaction costs payed to the broker. More formally \begin{equation*} \Delta W_{t+1} = W_{t+1} - W_t = \text{PNL}_{t+1} - \text{TC}_{t} \end{equation*} where $\text{PNL}_{t+1}$ denotes the profit due to the variation of the portfolio asset prices between $t$ and $t+1$ \begin{equation*} \text{PNL}_{t+1} = {n}_t \cdot \Delta{Z}_{t+1} = \sum^{I}_{i=0} n_t^i (Z_{t+1}^i - Z_t^i) \end{equation*} and $\text{TC}_t$ denotes the fees payed to the broker to change the portfolio allocation and on the short positions \begin{equation*} \text{TC}_t = \sum^{I}_{i=0} \delta_p^i \left| n_t^i - n_{t-1}^i\right| Z_t^i - \delta_f W_t \ind{{n}_t \neq {n}_{t-1}} - \sum^{I}_{i=0} \delta_s^i {(n_t^i)}^{-} Z_t^i \end{equation*} The transaction costs consist of three different components. The first term represent a transaction cost that is proportional to the change in value of the position in each asset. The second term is a fixed fraction of the total value of the portfolio which is payed only if the allocation is changed. The last term represents the fees payed to the broker for the shares borrowed to build a short position. The portfolio return between $t$ and $t+1$ is thus given by \begin{equation}\label{eq:portfolio_return} X_{t+1} = \frac{\Delta W_{t+1}}{W_t} = \sum^{I}_{i=0} \left[ a_t^i X_{t+1}^i - \delta_i \left| a_t^i - \tilde{a}_t^i \right| - \delta_s {(a_t^i)}^- \right] - \delta_f \ind{{a}_t \neq \tilde{{a}}_{t-1}} \end{equation} where \begin{equation*} X_{t+1}^i = \frac{\Delta Z_{t+1}^i}{Z_t^i} \end{equation*} is the return of the $i$-th stock between $t$ and $t+1$, \begin{equation*} a_t^i = \frac{n_t^i Z_t^i}{W_t} \end{equation*} is the fraction of wealth invested in the $i$-th stock between time $t$ and $t+1$ and finally \begin{equation*} \tilde{a}_t^i = \frac{n_{t-1}^i Z_t^i}{W_t} = \frac{a_{t-1}^i (1+X_t^i)} {1 + X_t} \end{equation*} is the fraction of wealth invested in the $i$-th stock just before the reallocation. We assume that the agent invests all his wealth at each step, so that $W_t$ can be also interpreted as the value of his portfolio. This assumption leads to the following constraint on the portfolio weights \begin{equation} \sum^{I}_{i=0} a_t^i = 1 \;\;\;\;\; \forall t \in \{0, 1, 2, \ldots\} \end{equation} We notice that we are neglecting the typical margin requirements on the short positions, which would reduce the available capital at time $t$. Considering margin requirements would lead to a more complex constraint on the portfolio weights which would be difficult to treat in the reinforcement learning framework. Plugging this constraint into Eq. (\ref{eq:portfolio_return}), we obtain \begin{equation}\label{eq:portfolio_return_benchmark} X_{t+1} = X + \sum^{I}_{i=1} a_t^i (X_{t+1}^i - X) - \sum^{I}_{i=0} \left[\delta_i \left| a_t^i - \tilde{a}_t^i \right| - \delta_s^i {(a_t^i)}^-\right] - \delta_f \ind{{a}_t \neq \tilde{{a}}_{t-1}} \end{equation} which highlights the role of the risk-free asset as a benchmark for the portfolio returns. The total profit realized by the investor between $t=0$ and $T$ is \begin{equation*} \Pi_T = W_T - W_0 = \sum^{T}_{t=1} \Delta W_t = \sum^{T}_{t=1} W_t X_t \end{equation*} The portfolio return between $t=0$ and $T$ is given by \begin{equation*} X_{0,T} = \frac{W_T}{W_0} - 1 = \prod_{t=1}^T (1+X_t) - 1 \end{equation*} In order to cast the asset allocation problem in the reinforcement learning framework, we consider the log-return of the portfolio between $t=0$ and $T$ \begin{equation} R_{0,T} = \log \frac{W_T}{W_0} = \sum^{T}_{t=1} \log(1+X_t) = \sum_{t=1}^T R_t \end{equation} where $R_{t+1}$ is the log-return of the portfolio between $t$ and $t+1$ \begin{equation} R_{t+1} = \log \left\{ 1 + \sum^{I}_{i=0} \left[ a_t^i X_{t+1}^i - \delta_i \left| a_t^i - \tilde{a}_t^i \right| - \delta_s {(a_t^i)}^- \right] - \delta_f \ind{{a}_t \neq \tilde{{a}}_{t-1}}\right\} \end{equation} The portfolio log-return can be used as the reward function of a RL algorithm, either in a offline or in an online approach. \subsection{Reinforcement Learning Application} In the previous section we derived the reward function for the asset allocation problem with transaction costs. In order to apply the policy gradient algorithms discussed in the previous sections we still need to define the state space, the action space and the agent's policy. For simplicity, we limit ourselves to the case of a single risky asset, i.e. $I = 1$, but the discussion could be generalized to the multi-asset case.\\ We assume that at each time step the agent considers the $P+1$ past returns of the risky asset, i.e. $\{X_t, X_{t-1}, \ldots, X_{t-P}\}$. In order to properly incorporate the effects of transaction costs into his decision process, the agent must keep track of its current position $\tilde{a}_t$. The state of the system is thus given by $S_t = \{X_t, X_{t-1}, \ldots, X_{t-P}, \tilde{a}_t\}$ We might also include some external variables $Y_t$ that may be relevant to the trader, such as the common technical indicator used in practice. Furthermore, these input variables may be used to construct more complex features for example using some deep learning techniques, such as a deep auto-encoder.\\ The agent, or trading system, specifies the portfolio weights $a_t = (a_t^0, a_t^1)^T$ according to a long-short strategy, i.e. the agent may be long ($a_t^1 = +1$) or short ($a_t^1 = -1$) on the risky-asset while $a_t^0 = 1 - a_t^1$ since the agents invests all the available capital at each time step. In the GPOMDP framework we assume that the agent selects $a_t^1$ according to a Boltzmann policy, i.e. \begin{equation} \pi_\theta(s, +1) = \frac{e^{\theta^T s}}{1 + e^{\theta^T s}} \;\;\;\;\; \pi_\theta(s, -1) = \frac{1}{1 + e^{\theta^T s}} \end{equation} where we included a bias term in the parameters and in the state. In the parameter-based formulation, we assume that agent selects actions according to the binary controller \begin{equation} F_\theta(s) = \sign(\theta^T s) \end{equation} where the controller parameters are normally distributed $\theta \sim \calN(\mu, \diag(\sigma))$. Since the formulation of the asset allocation problem given above is non-episodic, we actually applied the online version of the algorithms discussed above. The main considerations made above still hold and we refer to the full thesis for the details.
{ "alphanum_fraction": 0.7255630091, "avg_line_length": 79.5047619048, "ext": "tex", "hexsha": "d6f34db2aa5d091c35d31e331c947c84c14ca6b7", "lang": "TeX", "max_forks_count": 34, "max_forks_repo_forks_event_max_datetime": "2021-08-21T21:48:53.000Z", "max_forks_repo_forks_event_min_datetime": "2017-05-15T07:51:52.000Z", "max_forks_repo_head_hexsha": "1a3ae97023acff1ee5e2d197a446734117a6fb99", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "pnecchi/Thesis", "max_forks_repo_path": "Pacs/Report/Sections/3_application_to_systematic_trading.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1a3ae97023acff1ee5e2d197a446734117a6fb99", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "pnecchi/Thesis", "max_issues_repo_path": "Pacs/Report/Sections/3_application_to_systematic_trading.tex", "max_line_length": 827, "max_stars_count": 80, "max_stars_repo_head_hexsha": "1a3ae97023acff1ee5e2d197a446734117a6fb99", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "AmineAboussalah/Thesis", "max_stars_repo_path": "Pacs/Report/Sections/3_application_to_systematic_trading.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-24T23:47:13.000Z", "max_stars_repo_stars_event_min_datetime": "2016-06-13T15:20:29.000Z", "num_tokens": 2567, "size": 8348 }
\section{Cap product and ``\v{C}ech'' cohomology} Let $R$ be a commutative ring with coefficients. The cap product is a map $\cap: H^p(X)\otimes H_n(X)\to H_{q}(X)$ where $p+q=n$. This comes from a chain level map $S^p(X)\otimes S_n(X)\xrightarrow{1\otimes\alpha} S^p(X)\otimes S_p(X)\otimes S_q(X)\xrightarrow{\langle-,-\rangle\otimes 1}R\otimes S_q(X)\cong S_q(X)$. Using our explicit formula for $\alpha$, we can write: \begin{equation*} \cap:\beta\otimes\sigma\mapsto\beta\otimes(\sigma\circ\alpha_p)\otimes(\sigma\circ\omega_q)\mapsto\left(\beta(\sigma\circ\alpha_p)\right)\cdot (\sigma\circ\omega_q) \end{equation*} There's many things to say: \begin{enumerate} \item $ H_\ast(X)$ is a module for $ H^\ast(X)$. \item The only reasonable thing to ask for in terms of naturality is the following. Suppose $f:X\to Y$, and let $b\in H^p(Y)$ and $x\in H_n(X)$. We then have $f_\ast(f^\ast(b)\cap x)=b\cap f_\ast(x)$, where $f^\ast(b)\cap x\in H_q(X)$ and $b\cap f_\ast(x)\in H_n(X)$. This is called a projection formula. To see this, let $[\beta]=b$. Then: \begin{align*} f_\ast(f^\ast(\beta)\cap\sigma)& =f_\ast(\left(f^\ast(\beta)(\sigma\circ\alpha_p)\right)\cdot(\sigma\circ\omega_q))\\ & =f_\ast(\beta(f\circ\sigma\circ\alpha_p)\cdot(\sigma\circ\omega))\\ & =\beta(f\circ\sigma\circ\alpha_p)\cdot f_\ast(\sigma\circ\omega_q)\\ & = \beta(f\circ\sigma\circ\alpha_p)\cdot(f\circ\sigma\circ\omega_q)\\ & = \beta\cap f_\ast(\sigma) \end{align*} So we're done. \item There's a relation between the cap and Kronecker product. Any space has an augmentation $\varepsilon:X\to\ast$, so I get $\varepsilon_\ast: H_\ast(X)\to R$. Maybe we should compute $\varepsilon_\ast(\beta\cap \sigma)$. I will get zero unless $p=n$ and $q=0$. What does our formula say? This just says that $\varepsilon_\ast(b\cap x)=\varepsilon_\ast(\beta(\sigma)\cdot c^0_{\sigma(n)})=\beta(\sigma)\varepsilon_\ast(c^0_{\sigma(n)})=\beta(\sigma)=\langle \beta,\sigma\rangle$ because $\varepsilon_\ast$ counts the number of points, i.e., it's $1$. Hence $\varepsilon_\ast(b\cap x)=\langle b,x\rangle$. \item What is $\langle a\cup b,x\rangle$? This is $\varepsilon_\ast((a\cup b)\cap x)=\varepsilon_\ast(a\cap(b\cap x))$ by an assertion in the previous lecture (namely that $(\alpha\cup\beta)\cap x=\alpha\cap(\beta\cap x)$ and $1\cap x=x$), which becomes $\langle a,b\cap x\rangle$. In other words, $\langle a\cup b,x\rangle=\langle a,b\cap x\rangle$. So the cup product is adjoint to the cap product. \end{enumerate} \subsection{Relative $\cap$} There's a lot of structure, but we want more. We want to now try to understand the relative cap product. Suppose $A\subseteq X$ is a subspace. We have: \begin{equation*} \xymatrix{ 0\ar[d] & & 0\ar[d]\\ S^p(X)\otimes S_n(A)\ar[d]^{1\otimes i_\ast}\ar[r]^{i^\ast\otimes 1} & S^p(A)\otimes S_n(A)\ar[r]^{\cap} & S_q(A)\ar[d]\\ S^p(X)\otimes S_n(X)\ar[rr]^\cap\ar[d] & & S_q(X)\ar[d]\\ S^p(X)\otimes S_n(X,A)\ar[d]\ar@{-->}[rr] & & S_q(X,A)\ar[d]\\ 0 & & 0 } \end{equation*} The left sequence is exact because $0\to S_n(A)\to S_n(X)\to S_n(X,A)\to 0$ splits and tensoring with $S^p(X)$ still leaves it exact. We have to check that this diagram commutes. Let $\beta\otimes \sigma\in S^p(X)\otimes S_n(A)$. We then get: \begin{equation*} \beta\otimes\sigma\xrightarrow{i^\ast\otimes 1}i^\ast\beta\otimes\sigma\to i^\ast(\beta)\cap\sigma\xrightarrow{i_\ast}i_\ast(i^\ast(\beta)\cap\sigma) \end{equation*} And: \begin{equation*} \beta\otimes\sigma\xrightarrow{1\otimes i_\ast}\beta\otimes i_\ast\sigma\to \beta\cap i_\ast(\sigma) \end{equation*} So they're equal by the projection formula. Hence you get $\cap: H^p(X)\otimes H_n(X,A)\to H_q(X,A)$ that makes $ H_\ast(X,A)$ a $ H^\ast(X)$-module. \subsection{A different perspective on excision} Recall what excision is. We know that $ H_\ast(X-U,A-U)\cong H_\ast(X,A)$. There's another perspective on this. Suppose $K\subseteq U\subseteq X$ such that $\overline{K}\subseteq\mathrm{Int}(U)$. To simplify things, suppose $K$ is closed and $U$ is open. Let $A=X-K\supseteq X-U=V$. Then excision says that $ H_\ast(X-V,A-V)= H_\ast(X-(X-U),(X-K)-(X-U))\cong H_\ast(X,A)= H_\ast(X,X-K)$. There's a simpler expression: $ H_\ast(X-(X-U),(X-K)-(X-U))= H_\ast(U,U-K)$, so $ H_\ast(U,U-K)\cong H_\ast(X,X-K)$, i.e., it depends only on an open neighborhood of $K$. A question that we now have is: how does this depend on $ H_\ast(K)$? $ H^\ast(K)$? This is really what Poincar\'{e} duality wants to understand. \begin{example} We'll eventually be talking, for example, about $X=S^3$ and $K=\text{knot}$. \end{example} We want to understand $ H_\ast(X,X-K)$ better. We have a cap product $ H^p(X)\otimes H_n(X,X-K)\to H_q(X,X-K)$. We just decided that $ H_n(X,X-K)\cong H_n(U,U-K)$, and so I have the cap product $ H^p(U)\otimes H_n(U,U-K)\to H_q(U-U-K)$. Hence I get the cap product map like $ H^p(U)\otimes H_n(X,X-K)\to H_q(X,X-K)$. But this seems to depend upon a choice of $U$. What if I make $U$ smaller? \begin{lemma} Let $U\supseteq V\supseteq K$. Then: \begin{equation*} \xymatrix{ H^p(U)\ar[dd]^{i^\ast\otimes 1}\otimes H_n(X,X-K)\ar[dr]^\cap & \\ & H_q(X,X-K)\\ H^p(V)\otimes H_n(X,X-K)\ar[ur]^\cap } \end{equation*} \end{lemma} \begin{proof} Hint: use projection formula again. \end{proof} Let $\mathcal{U}_K$ be the set of open neighborhoods of $K$ in $X$. This is a poset (actually a directed set because you can take intersections), under reverse-inclusion as the ordering. This lemma says that $ H^p:\mathcal{U}_K\to\mathbf{Ab}$. \begin{definition} $\cHH^p(K):=\varinjlim_{U\in\mathcal{U}_K} H^p(U)$. \end{definition} This is bad notation because it depends on the way $K$ is sitting in $X$. You therefore get $\cHH^p(K)\otimes H_n(X,X-K)\xrightarrow{\cap} H_q(X,X-K)$. This is the best you can do. It's the natural structure that this relative homology has, i.e., $ H_\ast(X,X-K)$ is a module over $\cHH^\ast(K)$. Sometimes, $\cHH^\ast(K)$ will just be $ H^\ast(K)$. Suppose $K\subseteq X$ satisfies the condition (called the ``regularity'' condition) that for every open $U\supseteq K$, there exists an open $V$ such that $U\supseteq V\supseteq K$ such that $K\to V$ is a homotopy equivalence (or actually just a homology isomorphism). (for example, a smooth knot in $S^3$) Then: \begin{lemma} Suppose $\cI$ is a directed set (nonempty). Let $F:\cI\to\mathbf{Ab}$, and suppose I have a natural transformation $\theta:F\to c_A$ (for example, a map from $F$ to its direct limit). This expresses $A$ as $\varinjlim_\cI F$ provided that for all $i$, there is $j\geq i$ such that $F(i)\to A$ factors through $F(j)\to A$, which should be an isomorphism, i.e.: \begin{equation*} \xymatrix{ F(i)\ar[dr]\ar[rr] & & A\\ & F(j)\ar[ur]^\cong } \end{equation*} \end{lemma} \begin{proof} Given $a\in A$, it has to come from somewhere to be a direct limit. This is obviously true. Also, for any $i$ and $a_i\in F(i)$ such that $a_i\mapsto 0$, then there exists $j\geq i$ such that $a_i\mapsto 0\in F(j)$. This is also obvious. \end{proof} \begin{remark} This is a really strong condition by the way. It is a really stupid way. \end{remark} This works in the case that $K$ is regular in $X$. Thus, under this condition, $\cHH^p(K)\cong H^p(K)$. One other comment is that more generally, if $X$ is an Euclidean neighborhood retract (a retract of a neighborhood in some $\RR^n$), and $K$ is locally compact, then $\cHH^p(K)$ depends only on $K$, and it is isomorphic to \v{C}ech cohomology (which is a different type of cohomology theory).
{ "alphanum_fraction": 0.6871083855, "avg_line_length": 86.2183908046, "ext": "tex", "hexsha": "0f0be5d6ad73e240e2a6471ee41cd1024fd36bad", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2019-08-13T17:38:04.000Z", "max_forks_repo_forks_event_min_datetime": "2017-10-21T18:15:11.000Z", "max_forks_repo_head_hexsha": "3f5d3189e2082716a69fccc1711d02ed848552d2", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ichung/algtop-notes", "max_forks_repo_path": "old-905/lec-32-cap-product-cech-cohomology.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "3f5d3189e2082716a69fccc1711d02ed848552d2", "max_issues_repo_issues_event_max_datetime": "2018-03-13T17:59:46.000Z", "max_issues_repo_issues_event_min_datetime": "2018-03-13T17:54:37.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ichung/algtop-notes", "max_issues_repo_path": "old-905/lec-32-cap-product-cech-cohomology.tex", "max_line_length": 704, "max_stars_count": 5, "max_stars_repo_head_hexsha": "3f5d3189e2082716a69fccc1711d02ed848552d2", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ichung/algtop-notes", "max_stars_repo_path": "old-905/lec-32-cap-product-cech-cohomology.tex", "max_stars_repo_stars_event_max_datetime": "2021-05-27T22:47:06.000Z", "max_stars_repo_stars_event_min_datetime": "2017-04-26T15:00:52.000Z", "num_tokens": 2641, "size": 7501 }
\def\true{true} \let\fpacm\true \documentclass[onecolumn,11pt,nocopyrightspace,preprint]{sigplanconf} \usepackage{amstext} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{moreverb} \usepackage{tikz} \usepackage{xspace} \usepackage{mymacros} \def\fppdf{true} \usepackage{fppdf} \input{macros} \input{version} % Let Menhir's version number appear at the bottom right of every page. \makeatletter \def\@formatyear{\menhirversion} \makeatother % Hevea-specific logic; see % http://hevea.inria.fr/doc/manual007.html % http://hevea.inria.fr/doc/manual008.html \usepackage{hevea} \newenvironment{heveapicture}{ \begin{toimage} }{ \end{toimage} \imageflush{} } % ------------------------------------------------------------------------------ % Headings. \title{\menhir Reference Manual\\\normalsize (version \menhirversion)} \begin{document} \authorinfo{François Pottier\and Yann Régis-Gianas} {INRIA} {\{Francois.Pottier, Yann.Regis-Gianas\}@inria.fr} \maketitle % ------------------------------------------------------------------------------ \clearpage \tableofcontents \clearpage % ------------------------------------------------------------------------------ \section{Foreword} \menhir is a parser generator. It turns high-level grammar specifications, decorated with semantic actions expressed in the \ocaml programming language~\cite{ocaml}, into parsers, again expressed in \ocaml. It is based on Knuth's LR(1) parser construction technique~\cite{knuth-lr-65}. It is strongly inspired by its precursors: \yacc~\cite{johnson-yacc-79}, \texttt{ML-Yacc}~\cite{tarditi-appel-00}, and \ocamlyacc~\cite{ocaml}, but offers a large number of minor and major improvements that make it a more modern tool. This brief reference manual explains how to use \menhir. It does not attempt to explain context-free grammars, parsing, or the LR technique. Readers who have never used a parser generator are encouraged to read about these ideas first~\cite{aho-86,appel-tiger-98,hopcroft-motwani-ullman-00}. They are also invited to have a look at the \distrib{demos} directory in \menhir's distribution. Potential users of \menhir should be warned that \menhir's feature set is not completely stable. There is a tension between preserving a measure of compatibility with \ocamlyacc, on the one hand, and introducing new ideas, on the other hand. Some aspects of the tool, such as the error handling mechanism, are still potentially subject to incompatible changes: for instance, in the future, the current error handling mechanism (which is based on the \error token, see \sref{sec:errors}) could be removed and replaced with an entirely different mechanism. There is room for improvement in the tool and in this reference manual. Bug reports and suggestions are welcome! % ------------------------------------------------------------------------------ \section{Usage} \menhir is invoked as follows: \begin{quote} \cmenhir \nt{option} \ldots \nt{option} \nt{filename} \ldots \nt{filename} \end{quote} Each of the file names must end with \mly (unless \ocoq is used, in which case it must end with \vy) and denotes a partial grammar specification. These partial grammar specifications are joined (\sref{sec:split}) to form a single, self-contained grammar specification, which is then processed. The following optional command line switches allow controlling many aspects of the process. \docswitch{\obase \nt{basename}} This switch controls the base name of the \ml and \mli files that are produced. That is, the tool will produce files named \nt{basename}\texttt{.ml} and \nt{basename}\texttt{.mli}. Note that \nt{basename} can contain occurrences of the \texttt{/} character, so it really specifies a path and a base name. When only one \nt{filename} is provided on the command line, the default \nt{basename} is obtained by depriving \nt{filename} of its final \mly suffix. When multiple file names are provided on the command line, no default base name exists, so that the \obase switch \emph{must} be used. \docswitch{\ocmly} This switch causes \menhir to produce a \cmly file in addition to its normal operation. This file contains a (binary-form) representation of the grammar and automaton (see \sref{sec:sdk}). \docswitch{\ocomment} This switch causes a few comments to be inserted into the \ocaml code that is written to the \ml file. \docswitch{\ocompareerrors \nt{filename1} \ocompareerrors \nt{filename2}} Two such switches must always be used in conjunction so as to specify the names of two \messages files, \nt{filename1} and \nt{filename2}. Each file is read and internally translated to a mapping of states to messages. \menhir then checks that the left-hand mapping is a subset of the right-hand mapping. This feature is typically used in conjunction with \olisterrors to check that \nt{filename2} is complete (that is, covers all states where an error can occur). For more information, see \sref{sec:errors:new}. \docswitch{\ocompileerrors \nt{filename}} This switch causes \menhir to read the file \nt{filename}, which must obey the \messages file format, and to compile it to an \ocaml function that maps a state number to a message. The \ocaml code is sent to the standard output channel. At the same time, \menhir checks that the collection of input sentences in the file \nt{filename} is correct and irredundant. For more information, see \sref{sec:errors:new}. \docswitch{\ocoq} This switch causes \menhir to produce Coq code. See \sref{sec:coq}. \docswitch{\ocoqlibpath \nt{path}} This switch allows specifying under what name (or path) the Coq support library MenhirLib is known to Coq. When \menhir runs in \ocoq mode, the generated parser contains references to several modules in this library. This path is used to qualify these references. Its default value is \texttt{MenhirLib}. \docswitch{\ocoqlibnopath} This switch indicates that references to the Coq library MenhirLib should \emph{not} be qualified. This was the default behavior of \menhir prior to 2018/05/30. This switch is provided for compatibility, but normally should not be used. \docswitch{\ocoqnoactions} (Used in conjunction with \ocoq.) This switch causes the semantic actions present in the \vy file to be ignored and replaced with \verb+tt+, the unique inhabitant of Coq's \verb+unit+ type. This feature can be used to test the Coq back-end with a standard grammar, that is, a grammar that contains \ocaml semantic actions. Just rename the file from \mly to \vy and set this switch. \docswitch{\ocoqnocomplete} (Used in conjunction with \ocoq.) This switch disables the generation of the proof of completeness of the parser (\sref{sec:coq}). This can be necessary because the proof of completeness is possible only if the grammar has no conflict (not even a benign one, in the sense of \sref{sec:conflicts:benign}). This can be desirable also because, for a complex grammar, completeness may require a heavy certificate and its validation by Coq may take time. \docswitch{\odepend} See \sref{sec:build}. \docswitch{\odump} This switch causes a description of the automaton to be written to the file \nt{basename}\automaton. \docswitch{\oechoerrors \nt{filename}} This switch causes \menhir to read the \messages file \nt{filename} and to produce on the standard output channel just the input sentences. (That is, all messages, blank lines, and comments are filtered out.) For more information, see \sref{sec:errors:new}. \docswitch{\oexplain} This switch causes conflict explanations to be written to the file \nt{basename}\conflicts. See also \sref{sec:conflicts}. \docswitch{\oexternaltokens \nt{T}} This switch causes the definition of the \token type to be omitted in \nt{basename}\texttt{.ml} and \nt{basename}\texttt{.mli}. Instead, the generated parser relies on the type $T$\texttt{.}\token, where $T$ is an \ocaml module name. It is up to the user to define module $T$ and to make sure that it exports a suitable \token type. Module $T$ can be hand-written. It can also be automatically generated out of a grammar specification using the \oonlytokens switch. \docswitch{\ofixedexc} This switch causes the exception \texttt{Error} to be internally defined as a synonym for \texttt{Parsing.Parse\_error}. This means that an exception handler that catches \texttt{Parsing.Parse\_error} will also catch the generated parser's \texttt{Error}. This helps increase \menhir's compatibility with \ocamlyacc. There is otherwise no reason to use this switch. \docswitch{\ograph} This switch causes a description of the grammar's dependency graph to be written to the file \nt{basename}\dott. The graph's vertices are the grammar's nonterminal symbols. There is a directed edge from vertex $A$ to vertex $B$ if the definition of $A$ refers to $B$. The file is in a format that is suitable for processing by the \emph{graphviz} toolkit. \docswitch{\oinfer, \oinferwrite, \oinferread} See \sref{sec:build}. \docswitch{\oinspection} This switch requires \otable. It causes \menhir to generate not only the monolithic and incremental APIs (\sref{sec:monolithic}, \sref{sec:incremental}), but also the inspection API (\sref{sec:inspection}). Activating this switch causes a few more tables to be produced, resulting in somewhat larger code size. \docswitch{\ointerpret} This switch causes \menhir to act as an interpreter, rather than as a compiler. No \ocaml code is generated. Instead, \menhir reads sentences off the standard input channel, parses them, and displays outcomes. This switch can be usefully combined with \otrace. For more information, see \sref{sec:interpret}. \docswitch{\ointerpreterror} This switch is analogous to \ointerpret, except \menhir expects every sentence to cause an error on its last token, and displays information about the state in which the error is detected, in the \messages file format. For more information, see \sref{sec:errors:new}. \docswitch{\ointerpretshowcst} This switch, used in conjunction with \ointerpret, causes \menhir to display a concrete syntax tree when a sentence is successfully parsed. For more information, see \sref{sec:interpret}. \docswitch{\olisterrors} This switch causes \menhir to produce (on the standard output channel) a complete list of input sentences that cause an error, in the \messages file format. For more information, see \sref{sec:errors:new}. \docswitch{\ologautomaton \nt{level}} When \nt{level} is nonzero, this switch causes some information about the automaton to be logged to the standard error channel. \docswitch{\ologcode \nt{level}} When \nt{level} is nonzero, this switch causes some information about the generated \ocaml code to be logged to the standard error channel. \docswitch{\ologgrammar \nt{level}} When \nt{level} is nonzero, this switch causes some information about the grammar to be logged to the standard error channel. When \nt{level} is 2, the \emph{nullable}, \emph{FIRST}, and \emph{FOLLOW} tables are displayed. \docswitch{\onodollars} This switch disallows the use of positional keywords of the form \kw{\$i}. \docswitch{\onoinline} This switch causes all \dinline keywords in the grammar specification to be ignored. This is especially useful in order to understand whether these keywords help solve any conflicts. \docswitch{\onostdlib} This switch instructs \menhir to \emph{not} use its standard library (\sref{sec:library}). \docswitch{\oocamlc \nt{command}} See \sref{sec:build}. \docswitch{\oocamldep \nt{command}} See \sref{sec:build}. \docswitch{\oonlypreprocess} This switch causes the grammar specifications to be transformed up to the point where the automaton's construction can begin. The grammar specifications whose names are provided on the command line are joined (\sref{sec:split}); all parameterized nonterminal symbols are expanded away (\sref{sec:templates}); type inference is performed, if \oinfer is enabled; all nonterminal symbols marked \dinline are expanded away (\sref{sec:inline}). This yields a single, monolithic grammar specification, which is printed on the standard output channel. \docswitch{\oonlytokens} This switch causes the \dtoken declarations in the grammar specification to be translated into a definition of the \token type, which is written to the files \nt{basename}\texttt{.ml} and \nt{basename}\texttt{.mli}. No code is generated. This is useful when a single set of tokens is to be shared between several parsers. The directory \distrib{demos/calc-two} contains a demo that illustrates the use of this switch. \docswitch{\orawdepend} See \sref{sec:build}. \docswitch{\ostdlib \nt{directory}} This switch controls the directory where the standard library (\sref{sec:library}) is found. It takes precedence over both the installation-time directory and the directory that may be specified via the environment variable \verb+$MENHIR_STDLIB+. \docswitch{\ostrict} This switch causes several warnings about the grammar and about the automaton to be considered errors. This includes warnings about useless precedence declarations, non-terminal symbols that produce the empty language, unreachable non-terminal symbols, productions that are never reduced, conflicts that are not resolved by precedence declarations, and end-of-stream conflicts. \docswitch{\oo{suggest-*}} See \sref{sec:build}. \docswitch{\otable} This switch causes \menhir to use its table-based back-end, as opposed to its (default) code-based back-end. When \otable is used, \menhir produces significantly more compact and somewhat slower parsers. See \sref{sec:qa} for a speed comparison. The table-based back-end produces rather compact tables, which are analogous to those produced by \yacc, \bison, or \ocamlyacc. These tables are not quite stand-alone: they are exploited by an interpreter, which is shipped as part of the support library \menhirlib. For this reason, when \otable is used, \menhirlib must be made visible to the \ocaml compilers, and must be linked into your executable program. The \texttt{--suggest-*} switches, described above, help do this. The code-based back-end compiles the LR automaton directly into a nest of mutually recursive \ocaml functions. In that case, \menhirlib is not required. The incremental API (\sref{sec:incremental}) and the inspection API (\sref{sec:inspection}) are made available only by the table-based back-end. \docswitch{\otimings} This switch causes internal timing information to be sent to the standard error channel. \docswitch{\otrace} This switch causes tracing code to be inserted into the generated parser, so that, when the parser is run, its actions are logged to the standard error channel. This is analogous to \texttt{ocamlrun}'s \texttt{p=1} parameter, except this switch must be enabled at compile time: one cannot selectively enable or disable tracing at runtime. \docswitch{\oignoreprec} This switch suppresses all warnings about useless \dleft, \dright, \dnonassoc and \dprec declarations. \docswitch{\oignoreone \nt{symbol}} This switch suppresses the warning that is normally emitted when \menhir finds that the terminal symbol \nt{symbol} is unused. \docswitch{\oignoreall} This switch suppresses all of the warnings that are normally emitted when \menhir finds that some terminal symbols are unused. \docswitch{\oupdateerrors \nt{filename}} This switch causes \menhir to read the \messages file \nt{filename} and to produce on the standard output channel a new \messages file that is identical, except the auto-generated comments have been re-generated. For more information, see \sref{sec:errors:new}. \docswitch{\oversion} This switch causes \menhir to print its own version number and exit. % ------------------------------------------------------------------------------ \section{Lexical conventions} A semicolon character (\kw{;}) \emph{may} appear after a declaration (\sref{sec:decls}). An old-style rule (\sref{sec:old:rules}) \emph{may} be terminated with a semicolon. Also, within an old-style rule, each producer (\sref{sec:producers}) \emph{may} be terminated with a semicolon. A new-style rule (\sref{sec:new:rules}) \emph{must not} be terminated with a semicolon. Within such a rule, the elements of a sequence \emph{must} be separated with semicolons. Semicolons are not allowed to appear anywhere except in the places mentioned above. This is in contrast with \ocamlyacc, which views semicolons as insignificant, just like whitespace. Identifiers (\nt{id}) coincide with \ocaml identifiers, except they are not allowed to contain the quote (\kw{'}) character. Following \ocaml, identifiers that begin with a lowercase letter (\nt{lid}) or with an uppercase letter (\nt{uid}) are distinguished. A quoted identifier \nt{qid} is a string enclosed in double quotes. Such a string cannot contain a double quote or a backslash. Quoted identifiers are used as token aliases (\sref{sec:tokens}). Comments are C-style (surrounded with \kw{/*} and \kw{*/}, cannot be nested), C++-style (announced by \kw{/$\!$/} and extending until the end of the line), or \ocaml-style (surrounded with \kw{(*} and \kw{*)}, can be nested). Of course, inside \ocaml code, only \ocaml-style comments are allowed. \ocaml type expressions are surrounded with \kangle{and}. Within such expressions, all references to type constructors (other than the built-in \textit{list}, \textit{option}, etc.) must be fully qualified. % ------------------------------------------------------------------------------ \section{Syntax of grammar specifications} \newcommand{\modifier}{(\,\dquestion \metachoice \dplus \metachoice \dstar\hspace{-.3mm})} \begin{figure} \begin{center} \begin{tabular}{r@{}c@{}l} \nt{specification} \is \sepspacelist{\nt{declaration}} \percentpercent \sepspacelist{\nt{rule}} \optional{\percentpercent \textit{\ocaml code}} \\ \nt{declaration} \is \dheader{\textit{\ocaml code}} \\ && \dparameter \ocamlparam \\ && \dtoken \optional{\ocamltype} \sepspacelist{\nt{uid} \optional{\nt{qid}}} \\ && \dnonassoc \sepspacelist{\nt{uid}} \\ && \dleft \sepspacelist{\nt{uid}} \\ && \dright \sepspacelist{\nt{uid}} \\ && \dtype \ocamltype \sepspacelist{\nt{lid}} \\ && \dstart \optional{\ocamltype} \sepspacelist{\nt{lid}} \\ && \dattribute \sepspacelist{\nt{actual}} \sepspacelist{\nt{attribute}} \\ && \kw{\%} \nt{attribute} \\ % a grammar-wide attribute && \donerrorreduce \sepspacelist{\nt{lid}} \\ \nt{attribute} \is \kw{[@} \nt{name} \nt{payload} \kw{]} \\[4mm] \emph{old syntax} --- \nt{rule} \is \optional{\dpublic} \optional{\dinline} \nt{lid} \oparams{\nt{id}} \deuxpoints \precseplist\barre{\nt{group}} \\ \nt{group} \is \seplist{\ \barre}{\nt{production}} \daction \optional {\dprec \nt{id}} \\ \nt{production} \is \sepspacelist{\nt{producer}} \optional {\dprec \nt{id}} \\ \nt{producer} \is \optional{\nt{lid} \dequal} \nt{actual} \\ \nt{actual} \is \nt{id} \oparams{\nt{actual}} \\ && \nt{actual} \modifier \\ && \seplist{\ \barre}{\nt{group}} % not really allowed everywhere \\[4mm] \emph{new syntax} --- \nt{rule} \is \optional{\dpublic} \dlet \nt{lid} \oparams{\nt{id}} (\,\dcolonequal \metachoice \dequalequal\hspace{-.2mm}) \expression \\ \expression \is % a choice between sequence expressions: \precseplist\barre\expression \\&& % a sequence expression: \optional{\pattern \dequal{}} \expression \dsemi \expression \\&& % a symbol expression: \nt{id} \oparams{\expression} \\&& % a symbol expression: \expression \modifier \\&& % an action expression: \daction \optional {\dprec \nt{id}} % %prec is in fact allowed to appear before the semantic action, % but this is not documented. \\&& % an action expression: \dpfaction{\nt{\ocaml id}} \optional {\dprec \nt{id}} \\ \pattern \is \nt{lid} \,\metachoice\, \dunderscore \,\metachoice\, \dtilde \,\metachoice\, \tuple\pattern % The places where attributes can be attached are not shown in this % figure. This is intentional; let's avoid pollution. Attributes are % described separately. \end{tabular} \end{center} \caption{Syntax of grammar specifications} \label{fig:syntax} \end{figure} The syntax of grammar specifications appears in \fref{fig:syntax}. The places where attributes can be attached are not shown; they are documented separately (\sref{sec:attributes}). % % (For compatibility with \ocamlyacc, some specifications that do not fully % adhere to this syntax are also accepted.) % A grammar specification begins with a sequence of declarations (\sref{sec:decls}), ended by a mandatory \percentpercent keyword. % Following this keyword, a sequence of rules is expected. Each rule defines a nonterminal symbol~\nt{lid}, whose name must begin with a lowercase letter. % % In reality, in the old syntax, this is enforced only for start symbols. % In the new syntax, this is enforced for all symbols. % % A rule can also *extend* a symbol, but let's not mention that here. % A rule is expressed either in the ``old syntax'' (\sref{sec:old:rules}) or in the ``new syntax'' (\sref{sec:new:rules}), which is slightly more elegant and powerful. \subsection{Declarations} \label{sec:decls} \subsubsection{Headers} \label{sec:decls:headers} A header is a piece of \ocaml code, surrounded with \dheader{and}. It is copied verbatim at the beginning of the \ml file. It typically contains \ocaml \kw{open} directives and function definitions for use by the semantic actions. If a single grammar specification file contains multiple headers, their order is preserved. However, when two headers originate in distinct grammar specification files, the order in which they are copied to the \ml file is unspecified. \subsubsection{Parameters} \label{sec:parameter} A declaration of the form: \begin{quote} \dparameter \ocamlparam \end{quote} causes the entire parser to become parameterized over the \ocaml module \nt{uid}, that is, to become an \ocaml functor. The directory \distrib{demos/calc-param} contains a demo that illustrates the use of this switch. If a single specification file contains multiple \dparameter declarations, their order is preserved, so that the module name \nt{uid} introduced by one declaration is effectively in scope in the declarations that follow. When two \dparameter declarations originate in distinct grammar specification files, the order in which they are processed is unspecified. Last, \dparameter declarations take effect before \dheader{$\ldots$}, \dtoken, \dtype, or \dstart declarations are considered, so that the module name \nt{uid} introduced by a \dparameter declaration is effectively in scope in \emph{all} \dheader{$\ldots$}, \dtoken, \dtype, or \dstart declarations, regardless of whether they precede or follow the \dparameter declaration. This means, in particular, that the side effects of an \ocaml header are observed only when the functor is applied, not when it is defined. \subsubsection{Tokens} \label{sec:tokens} A declaration of the form: \begin{quote} \dtoken \optional{\ocamltype} $\nt{uid}_1$ \optional{$\nt{qid}_1$} $\;\ldots\;$ $\nt{uid}_n$ \optional{$\nt{qid}_n$} \end{quote} defines the identifiers $\nt{uid}_1, \ldots, \nt{uid}_n$ as tokens, that is, as terminal symbols in the grammar specification and as data constructors in the \textit{token} type. If an \ocaml type $t$ is present, then these tokens are considered to carry a semantic value of type $t$, otherwise they are considered to carry no semantic value. If a quoted identifier $\nt{qid}_i$ is present, then it is considered an alias for the terminal symbol $\nt{uid}_i$. (This feature, known as ``token aliases'', is borrowed from Bison.) % https://www.gnu.org/software/bison/manual/html_node/Token-Decl.html#Token-Decl Throughout the grammar, the quoted identifier $\nt{qid}_i$ is then synonymous with the identifier $\nt{uid}_i$. % For example, if one declares: \begin{verbatim} %token PLUS "+" \end{verbatim} then the quoted identifier \texttt{"+"} stands for the terminal symbol \texttt{PLUS} throughout the grammar. An example of the use of token aliases appears in the directory \distrib{demos/calc-alias}. % Token aliases can be used to improve the readability of a grammar. One must keep in mind, however, that they are just syntactic sugar: they are not interpreted in any way by Menhir or conveyed to tools like \ocamllex. % They could be considered confusing by a reader who mistakenly believes that they are interpreted as string literals. \subsubsection{Priority and associativity} \label{sec:assoc} A declaration of one of the following forms: \begin{quote} \dnonassoc $\nt{uid}_1 \ldots \nt{uid}_n$ \\ \dleft $\nt{uid}_1 \ldots \nt{uid}_n$ \\ \dright $\nt{uid}_1 \ldots \nt{uid}_n$ \end{quote} assigns both a \emph{priority level} and an \emph{associativity status} to the symbols $\nt{uid}_1, \ldots, \nt{uid}_n$. The priority level assigned to $\nt{uid}_1, \ldots, \nt{uid}_n$ is not defined explicitly: instead, it is defined to be higher than the priority level assigned by the previous \dnonassoc, \dleft, or \dright declaration, and lower than that assigned by the next \dnonassoc, \dleft, or \dright declaration. The symbols $\nt{uid}_1, \ldots, \nt{uid}_n$ can be tokens (defined elsewhere by a \dtoken declaration) or dummies (not defined anywhere). Both can be referred to as part of \dprec annotations. Associativity status and priority levels allow shift/reduce conflicts to be silently resolved (\sref{sec:conflicts}). \subsubsection{Types} \label{sec:type} A declaration of the form: \begin{quote} \dtype \ocamltype $\nt{lid}_1 \ldots \nt{lid}_n$ \end{quote} assigns an \ocaml type to each of the nonterminal symbols $\nt{lid}_1, \ldots, \nt{lid}_n$. For start symbols, providing an \ocaml type is mandatory, but is usually done as part of the \dstart declaration. For other symbols, it is optional. Providing type information can improve the quality of \ocaml's type error messages. A \dtype declaration may concern not only a nonterminal symbol, such as, say, \texttt{expression}, but also a fully applied parameterized nonterminal symbol, such as \texttt{list(expression)} or \texttt{separated\_list(COMMA, option(expression))}. The types provided as part of \dtype declarations are copied verbatim to the \ml and \mli files. In contrast, headers (\sref{sec:decls:headers}) are copied to the \ml file only. For this reason, the types provided as part of \dtype declarations must make sense both in the presence and in the absence of these headers. They should typically be fully qualified types. % TEMPORARY type information can be mandatory in --coq mode; document? \subsubsection{Start symbols} \label{sec:start} A declaration of the form: \begin{quote} \dstart \optional{\ocamltype} $\nt{lid}_1 \ldots \nt{lid}_n$ \end{quote} declares the nonterminal symbols $\nt{lid}_1, \ldots, \nt{lid}_n$ to be start symbols. Each such symbol must be assigned an \ocaml type either as part of the \dstart declaration or via separate \dtype declarations. Each of $\nt{lid}_1, \ldots, \nt{lid}_n$ becomes the name of a function whose signature is published in the \mli file and that can be used to invoke the parser. \subsubsection{Attribute declarations} Attribute declarations of the form \dattribute \sepspacelist{\nt{actual}} \sepspacelist{\nt{attribute}} and \kw{\%} \nt{attribute} are explained in \sref{sec:attributes}. \subsubsection{Extra reductions on error} \label{sec:onerrorreduce} A declaration of the form: \begin{quote} \donerrorreduce $\nt{lid}_1 \ldots \nt{lid}_n$ \end{quote} marks the nonterminal symbols $\nt{lid}_1, \ldots, \nt{lid}_n$ as potentially eligible for reduction when an invalid token is found. This may cause one or more extra reduction steps to be performed before the error is detected. More precisely, this declaration affects the automaton as follows. Let us say that a production $\nt{lid} \rightarrow \ldots$ is ``reducible on error'' if its left-hand symbol~\nt{lid} appears in a \donerrorreduce declaration. After the automaton has been constructed and after any conflicts have been resolved, in every state~$s$, the following algorithm is applied: \begin{enumerate} \item Construct the set of all productions that are ready to be reduced in state~$s$ and are reducible on error; \item Test if one of them, say $p$, has higher ``on-error-reduce-priority'' than every other production in this set; \item If so, in state~$s$, replace every error action with a reduction of the production~$p$. (In other words, for every terminal symbol~$t$, if the action table says: ``in state~$s$, when the next input symbol is~$t$, fail'', then this entry is replaced with: ``in state~$s$, when the next input symbol is~$t$, reduce production~$p$''.) \end{enumerate} If step 3 above is executed in state~$s$, then an error can never be detected in state~$s$, since all error actions in state~$s$ are replaced with reduce actions. Error detection is deferred: at least one reduction takes place before the error is detected. It is a ``spurious'' reduction: in a canonical LR(1) automaton, it would not take place. An \donerrorreduce declaration does not affect the language that is accepted by the automaton. It does not affect the location where an error is detected. It is used to control in which state an error is detected. If used wisely, it can make errors easier to report, because they are detected in a state for which it is easier to write an accurate diagnostic message (\sref{sec:errors:diagnostics}). % This may make the tables bigger (but I have no statistics). % This makes LRijkstra significantly slower. Like a \dtype declaration, an \donerrorreduce declaration may concern not only a nonterminal symbol, such as, say, \texttt{expression}, but also a fully applied parameterized nonterminal symbol, such as \texttt{list(expression)} or \texttt{separated\_list(COMMA, option(expression))}. The ``on-error-reduce-priority'' of a production is that of its left-hand symbol. The ``on-error-reduce-priority'' of a nonterminal symbol is determined implicitly by the order of \donerrorreduce declarations. In the declaration $\donerrorreduce\;\nt{lid}_1 \ldots \nt{lid}_n$, the symbols $\nt{lid}_1, \ldots, \nt{lid}_n$ have the same ``on-error-reduce-priority''. They have higher ``on-error-reduce-priority'' than the symbols listed in previous \donerrorreduce declarations, and lower ``on-error-reduce-priority'' than those listed in later \donerrorreduce declarations. \subsection{Rules---old syntax} \label{sec:old:rules} In its simplest form, a rule begins with the nonterminal symbol \nt{lid}, followed by a colon character (\deuxpoints), and continues with a sequence of production groups (\sref{sec:productiongroups}). Each production group is preceded with a vertical bar character (\barre); the very first bar is optional. The meaning of the bar is choice: the nonterminal symbol \nt{id} develops to either of the production groups. We defer explanations of the keyword \dpublic (\sref{sec:split}), of the keyword \dinline (\sref{sec:inline}), and of the optional formal parameters $\tuple{\nt{id}}$ (\sref{sec:templates}). \subsubsection{Production groups} \label{sec:productiongroups} In its simplest form, a production group consists of a single production (\sref{sec:productions}), followed by an \ocaml semantic action (\sref{sec:actions}) and an optional \dprec annotation (\sref{sec:prec}). A production specifies a sequence of terminal and nonterminal symbols that should be recognized, and optionally binds identifiers to their semantic values. \paragraph{Semantic actions} \label{sec:actions} A semantic action is a piece of \ocaml code that is executed in order to assign a semantic value to the nonterminal symbol with which this production group is associated. A semantic action can refer to the (already computed) semantic values of the terminal or nonterminal symbols that appear in the production via the semantic value identifiers bound by the production. For compatibility with \ocamlyacc, semantic actions can also refer to unnamed semantic values via positional keywords of the form \kw{\$1}, \kw{\$2}, etc.\ This style is discouraged. (It is in fact forbidden if \onodollars is turned on.) Furthermore, as a positional keyword of the form \kw{\$i} is internally rewritten as \nt{\_i}, the user should not use identifiers of the form \nt{\_i}. \paragraph{\dprec annotations} \label{sec:prec} An annotation of the form \dprec \nt{id} indicates that the precedence level of the production group is the level assigned to the symbol \nt{id} via a previous \dnonassoc, \dleft, or \dright declaration (\sref{sec:assoc}). In the absence of a \dprec annotation, the precedence level assigned to each production is the level assigned to the rightmost terminal symbol that appears in it. It is undefined if the rightmost terminal symbol has an undefined precedence level or if the production mentions no terminal symbols at all. The precedence level assigned to a production is used when resolving shift/reduce conflicts (\sref{sec:conflicts}). \paragraph{Multiple productions in a group} If multiple productions are present in a single group, then the semantic action and precedence annotation are shared between them. This short-hand effectively allows several productions to share a semantic action and precedence annotation without requiring textual duplication. It is legal only when every production binds exactly the same set of semantic value identifiers and when no positional semantic value keywords (\kw{\$1}, etc.) are used. \subsubsection{Productions} \label{sec:productions} A production is a sequence of producers (\sref{sec:producers}), optionally followed by a \dprec annotation (\sref{sec:prec}). If a precedence annotation is present, it applies to this production alone, not to other productions in the production group. It is illegal for a production and its production group to both carry \dprec annotations. \subsubsection{Producers} \label{sec:producers} A producer is an actual (\sref{sec:actual}), optionally preceded with a binding of a semantic value identifier, of the form \nt{lid} \dequal. The actual specifies which construction should be recognized and how a semantic value should be computed for that construction. The identifier \nt{lid}, if present, becomes bound to that semantic value in the semantic action that follows. Otherwise, the semantic value can be referred to via a positional keyword (\kw{\$1}, etc.). \subsubsection{Actuals} \label{sec:actual} In its simplest form, an actual is just a terminal or nonterminal symbol $\nt{id}$. If it is a parameterized non-terminal symbol (see \sref{sec:templates}), then it should be applied: $\nt{id}\tuple{\nt{actual}}$. An actual may be followed with a modifier (\dquestion, \dplus, or \dstar). This is explained further on (see \sref{sec:templates} and \fref{fig:sugar}). An actual may also be an ``anonymous rule''. In that case, one writes just the rule's right-hand side, which takes the form $\seplist{\ \barre\ }{\nt{group}}$. (This form is allowed only as an argument in an application.) This form is expanded on the fly to a definition of a fresh non-terminal symbol, which is declared \dinline. For instance, providing an anonymous rule as an argument to \nt{list}: \begin{quote} \begin{tabular}{l} \nt{list} \dlpar \basic{e} = \nt{expression}; \basic{SEMICOLON} \dpaction{\basic{e}} \drpar \end{tabular} \end{quote} is equivalent to writing this: \begin{quote} \begin{tabular}{l} \nt{list} \dlpar \nt{expression\_SEMICOLON} \drpar \end{tabular} \end{quote} where the non-terminal symbol \nt{expression\_SEMICOLON} is chosen fresh and is defined as follows: \begin{quote} \begin{tabular}{l} \dinline \nt{expression\_SEMICOLON}: \newprod \basic{e} = \nt{expression}; \basic{SEMICOLON} \dpaction{\basic{e}} \end{tabular} \end{quote} \subsection{Rules---new syntax} \label{sec:new:rules} Please be warned that \textbf{the new syntax is considered experimental} and is subject to change in the future. % TEMPORARY à supprimer un jour... In its simplest form, a rule takes the form \dlet \nt{lid} \dcolonequal \expression. % Its left-hand side \nt{lid} is a nonterminal symbol; its right-hand side is an expression. % Such a rule defines an ordinary nonterminal symbol, while the alternate form \dlet \nt{lid} \dequalequal \expression defines an \dinline nonterminal symbol (\sref{sec:inline}), that is, a macro. % A rule can be preceded with the keyword \dpublic (\sref{sec:split}) and can be parameterized with a tuple of formal parameters $\tuple{\nt{id}}$ (\sref{sec:templates}). % The various forms of expressions, listed in \fref{fig:syntax}, are: % \begin{itemize} \item A \textbf{choice} between several expressions, % \precseplist\barre\expression. \optional{\barre} \expressionsub{1} \barre ${}\ldots{}$ \barre\expressionsub{n}. The leading bar is optional. \item A \textbf{sequence} of two expressions, \pattern \dequal \expressionsub{1} \dsemi \expressionsub{2}. The semantic value produced by \expressionsub{1} is decomposed according to the pattern \pattern. The \ocaml variables introduced by \pattern may appear in a semantic action that ends the sequence \expressionsub{2}. \item A sequence \dtilde \dequal \nt{id}${}_1$ \dsemi \expressionsub{2}, which is sugar for \nt{id}${}_1$ \dequal \nt{id}${}_1$ \dsemi \expressionsub{2}. This is a \textbf{pun}. % This is a special case of the previous form, % yet it receives special treatment; this is the % only case where ~ represents a deterministically-chosen name. \item A sequence \expressionsub{1} \dsemi \expressionsub{2}, which is sugar for \dunderscore \dequal \expressionsub{1} \dsemi \expressionsub{2}. \item A \textbf{symbol} \nt{id}, possibly applied to a tuple of expressions \dlpar \expressionsub{1},\ ${}\ldots{}$,\ \expressionsub{n} \drpar. It is worth noting that such an expression \emph{can} form the end of a sequence: \nt{id} at the end of a sequence stands for \nt{x} \dequal \nt{id} \dsemi \dpaction{\nt{x}} for some fresh variable \nt{x}. Thus, a sequence need not end with a semantic action. \item An expression followed with \dquestion, \dplus, or \dstar. This is sugar for the previous form: see \sref{sec:templates} and \fref{fig:sugar}. \item A \textbf{semantic action} \daction, possibly followed with a precedence annotation \dprec \nt{id}. This \ocaml code can refer to the variables that have been bound earlier in the sequence that this semantic action ends. These include all variables named by the user as well as all variables introduced by a $\dtilde$ pattern as part of a pun. % (but not variables introduced by deep ~ patterns) The notation $\kw{\$}i$, where $i$ is an integer, is forbidden. \item A \textbf{point-free semantic action} \dpfaction{\nt{\ocaml id}}, possibly followed with a precedence annotation \dprec~\nt{id}. The \ocaml identifier \nt{id} must denote a function or a data constructor. It is applied to a tuple of the variables that have been bound earlier in the sequence that this semantic action ends. Thus, $\dpfaction{\,\nt{id}\,}$ is sugar for $\dpaction{\,\nt{id}\;\,(x_1, \ldots, x_n)\,}$, where $x_1, \ldots, x_n$ are the variables bound earlier. These include all variables named by the user as well as all variables introduced by a $\dtilde$ pattern. \item An identity semantic action \dpfidentityaction. This is sugar for \dpfaction{\nt{identity}}, where \nt{identity} is \ocaml's identity function. Therefore, it is sugar for $\dpaction{\,(x_1, \ldots, x_n)\,}$, where $x_1, \ldots, x_n$ are the variables bound earlier. \end{itemize} \begin{comment} % fp: not sure if this paragraph is helpful. To some degree, an expression is analogous to an \ocaml expression: it returns an \ocaml value, and as a side effect, recognizes and consumes a fragment of the input. In particular, a sequence % \pattern \dequal \expressionsub{1} \dsemi \expressionsub{2} % is roughly analogous to an \ocaml sequence \verb+let p = e1 in e2+. % terminal symbol = recognize and consume one input symbol % nonterminal symbol = procedure call or macro invocation (if %inline) % semantic action = OCaml code insertion \end{comment} The syntax of expressions, as presented in \fref{fig:syntax}, seems more permissive than it really is. In reality, a choice cannot be nested inside a sequence; % (either on the left or on the right) a sequence cannot be nested in the left-hand side of a sequence; a semantic action cannot appear in the left-hand side of a sequence. (Thus, there is a stratification in three levels: choice expressions, sequence expressions, and atomic expressions, which corresponds roughly to the stratification of rules, productions, and producers in the old syntax.) % Furthermore, an expression between parentheses $\dlpar \expression \drpar$ is \emph{not} a valid expression. To surround an expression with parentheses, one must write either $\nt{midrule}\, \dlpar \expression \drpar$ or $\nt{endrule}\, \dlpar \expression \drpar$; see \sref{sec:library} and \fref{fig:standard}. When a complex expression (e.g., a choice or a sequence) is placed in parentheses, as in \nt{id}\,\dlpar\expression\drpar, this is equivalent to using $\nt{id}\,\dlpar\nt{s}\drpar$, where the fresh symbol~\nt{s} is declared as a synonym for this expression, via the declaration \dlet \nt{s} \dequalequal \expression. This idiom is also known as an anonymous rule (\sref{sec:actual}). \paragraph{Examples} As an example of a rule in the new syntax, the parameterized nonterminal symbol \nt{option}, which is part of Menhir's standard library (\sref{sec:library}), can be defined as follows: % \begin{quote} \begin{tabular}{l} \dlet \nt{option}(\nt{x}) \dcolonequal \\ \quad \barre \phantom{\nt{x} \dequal \nt{x} \dsemi{}} \dpaction{\nt{None}} \\ \quad \barre \nt{x} \dequal \nt{x} \dsemi{} \dpaction{\nt{Some x}} \end{tabular} \end{quote} % Using a pun, it can also be written as follows: % \begin{quote} \begin{tabular}{l} \dlet \nt{option}(\nt{x}) \dcolonequal \\ \quad \barre \phantom{\dtilde \dequal \nt{x} \dsemi{}} \dpaction{\nt{None}} \\ \quad \barre \dtilde \dequal \nt{x} \dsemi{} \dpaction{\nt{Some x}} \end{tabular} \end{quote} % Using a pun and a point-free semantic action, it can also be expressed as follows: % \begin{quote} \begin{tabular}{l} \dlet \nt{option}(\nt{x}) \dcolonequal \\ \quad \barre \phantom{\dtilde \dequal \nt{x} \dsemi{}} \dpaction{\nt{None}} \\ \quad \barre \dtilde \dequal \nt{x} \dsemi{} \dpfaction{\nt{Some}} \end{tabular} \end{quote} % As another example, the parameterized symbol $\nt{delimited}$, also part of Menhir's standard library (\sref{sec:library}), can be defined in the new syntax as follows: % \begin{quote} \begin{tabular}{l} \dlet \nt{delimited}(\nt{opening}, \nt{x}, \nt{closing}) \dequalequal \\ \quad \nt{opening} \dsemi \dtilde \dequal \nt{x} \dsemi \nt{closing} \dsemi \dpfidentityaction \end{tabular} \end{quote} % The use of $\dequalequal$ indicates that this is a macro, i.e., an \dinline nonterminal symbol (see \sref{sec:inline}). The identity semantic action \dpfidentityaction is here synonymous with \dpaction{\nt{x}}. Other illustrations of the new syntax can be found in the directories \distrib{demos/calc-new-syntax-dune} and \distrib{demos/calc-ast-dune}. \section{Advanced features} \subsection{Splitting specifications over multiple files} \label{sec:split} \paragraph{Modules} Grammar specifications can be split over multiple files. When \menhir is invoked with multiple argument file names, it considers each of these files as a \emph{partial} grammar specification, and \emph{joins} these partial specifications in order to obtain a single, complete specification. This feature is intended to promote a form a modularity. It is hoped that, by splitting large grammar specifications into several ``modules'', they can be made more manageable. It is also hoped that this mechanism, in conjunction with parameterization (\sref{sec:templates}), will promote sharing and reuse. It should be noted, however, that this is only a weak form of modularity. Indeed, partial specifications cannot be independently processed (say, checked for conflicts). It is necessary to first join them, so as to form a complete grammar specification, before any kind of grammar analysis can be done. This mechanism is, in fact, how \menhir's standard library (\sref{sec:library}) is made available: even though its name does not appear on the command line, it is automatically joined with the user's explicitly-provided grammar specifications, making the standard library's definitions globally visible. A partial grammar specification, or module, contains declarations and rules, just like a complete one: there is no visible difference. Of course, it can consist of only declarations, or only rules, if the user so chooses. (Don't forget the mandatory \percentpercent keyword that separates declarations and rules. It must be present, even if one of the two sections is empty.) \paragraph{Private and public nonterminal symbols} It should be noted that joining is \emph{not} a purely textual process. If two modules happen to define a nonterminal symbol by the same name, then it is considered, by default, that this is an accidental name clash. In that case, each of the two nonterminal symbols is silently renamed so as to avoid the clash. In other words, by default, a nonterminal symbol defined in module $A$ is considered \emph{private}, and cannot be defined again, or referred to, in module $B$. Naturally, it is sometimes desirable to define a nonterminal symbol $N$ in module $A$ and to refer to it in module~$B$. This is permitted if $N$ is public, that is, if either its definition carries the keyword \dpublic or $N$ is declared to be a start symbol. A public nonterminal symbol is never renamed, so it can be referred to by modules other than its defining module. In fact, it is permitted to split the definition of a \emph{public} nonterminal symbol, over multiple modules and/or within a single module. That is, a public nonterminal symbol $N$ can have multiple definitions, within one module and/or in distinct modules. All of these definitions are joined using the choice (\barre) operator. For instance, in the grammar of a programming language, the definition of the nonterminal symbol \nt{expression} could be split into multiple modules, where one module groups the expression forms that have to do with arithmetic, one module groups those that concern function definitions and function calls, one module groups those that concern object definitions and method calls, and so on. \paragraph{Tokens aside} Another use of modularity consists in placing all \dtoken declarations in one module, and the actual grammar specification in another module. The module that contains the token definitions can then be shared, making it easier to define multiple parsers that accept the same type of tokens. (On this topic, see \distrib{demos/calc-two}.) \subsection{Parameterizing rules} \label{sec:templates} A rule (that is, the definition of a nonterminal symbol) can be parameterized over an arbitrary number of symbols, which are referred to as formal parameters. \paragraph{Example} For instance, here is the definition of the parameterized nonterminal symbol \nt{option}, taken from the standard library (\sref{sec:library}): % \begin{quote} \begin{tabular}{l} \dpublic \basic{option}(\basic{X}): \newprod \dpaction{\basic{None}} \newprod \basic{x} = \basic{X} \dpaction{\basic{Some} \basic{x}} \end{tabular} \end{quote} % This definition states that \nt{option}(\basic{X}) expands to either the empty string, producing the semantic value \basic{None}, or to the string \basic{X}, producing the semantic value {\basic{Some}~\basic{x}}, where \basic{x} is the semantic value of \basic{X}. In this definition, the symbol \basic{X} is abstract: it stands for an arbitrary terminal or nonterminal symbol. The definition is made public, so \nt{option} can be referred to within client modules. A client who wishes to use \nt{option} simply refers to it, together with an actual parameter -- a symbol that is intended to replace \basic{X}. For instance, here is how one might define a sequence of declarations, preceded with optional commas: % \begin{quote} \begin{tabular}{l} \nt{declarations}: \newprod \dpaction{[]} \newprod \basic{ds} = \nt{declarations}; \nt{option}(\basic{COMMA}); \basic{d} = \nt{declaration} \dpaction{ \basic{d} :: \basic{ds} } \end{tabular} \end{quote} % This definition states that \nt{declarations} expands either to the empty string or to \nt{declarations} followed by an optional comma followed by \nt{declaration}. (Here, \basic{COMMA} is presumably a terminal symbol.) When this rule is encountered, the definition of \nt{option} is instantiated: that is, a copy of the definition, where \basic{COMMA} replaces \basic{X}, is produced. Things behave exactly as if one had written: \begin{quote} \begin{tabular}{l} \basic{optional\_comma}: \newprod \dpaction{\basic{None}} \newprod \basic{x} = \basic{COMMA} \dpaction{\basic{Some} \basic{x}} \\ \nt{declarations}: \newprod \dpaction{[]} \newprod \basic{ds} = \nt{declarations}; \nt{optional\_comma}; \basic{d} = \nt{declaration} \dpaction{ \basic{d} :: \basic{ds} } \end{tabular} \end{quote} % Note that, even though \basic{COMMA} presumably has been declared as a token with no semantic value, writing \basic{x}~=~\basic{COMMA} is legal, and binds \basic{x} to the unit value. This design choice ensures that the definition of \nt{option} makes sense regardless of the nature of \basic{X}: that is, \basic{X} can be instantiated with a terminal symbol, with or without a semantic value, or with a nonterminal symbol. \paragraph{Parameterization in general} In general, the definition of a nonterminal symbol $N$ can be parameterized with an arbitrary number of formal parameters. When $N$ is referred to within a production, it must be applied to the same number of actuals. In general, an actual is: % \begin{itemize} \item either a single symbol, which can be a terminal symbol, a nonterminal symbol, or a formal parameter; \item or an application of such a symbol to a number of actuals. \end{itemize} For instance, here is a rule whose single production consists of a single producer, which contains several, nested actuals. (This example is discussed again in \sref{sec:library}.) % \begin{quote} \begin{tabular}{l} \nt{plist}(\nt{X}): \newprod \basic{xs} = \nt{loption}(% \nt{delimited}(% \basic{LPAREN}, \nt{separated\_nonempty\_list}(\basic{COMMA}, \basic{X}), \basic{RPAREN}% )% ) \dpaction{\basic{xs}} \end{tabular} \end{quote} \begin{figure} \begin{center} \begin{tabular}{r@{\hspace{2mm}}c@{\hspace{2mm}}l} \nt{actual}\dquestion & is syntactic sugar for & \nt{option}(\nt{actual}) \\ \nt{actual}\dplus & is syntactic sugar for & \nt{nonempty\_list}(\nt{actual}) \\ \nt{actual}\dstar & is syntactic sugar for & \nt{list}(\nt{actual}) \end{tabular} \end{center} \caption{Syntactic sugar for simulating regular expressions, also known as EBNF} \label{fig:sugar} \end{figure} % Applications of the parameterized nonterminal symbols \nt{option}, \nt{nonempty\_list}, and \nt{list}, which are defined in the standard library (\sref{sec:library}), can be written using a familiar, regular-expression like syntax (\fref{fig:sugar}). \paragraph{Higher-order parameters} A formal parameter can itself expect parameters. For instance, here is a rule that defines the syntax of procedures in an imaginary programming language: % \begin{quote} \begin{tabular}{l} \nt{procedure}(\nt{list}): \newprod \basic{PROCEDURE} \basic{ID} \nt{list}(\nt{formal}) \nt{SEMICOLON} \nt{block} \nt{SEMICOLON} \dpaction{$\ldots$} \end{tabular} \end{quote} % This rule states that the token \basic{ID}, which represents the name of the procedure, should be followed with a list of formal parameters. (The definitions of the nonterminal symbols \nt{formal} and \nt{block} are not shown.) However, because \nt{list} is a formal parameter, as opposed to a concrete nonterminal symbol defined elsewhere, this definition does not specify how the list is laid out: which token, if any, is used to separate, or terminate, list elements? is the list allowed to be empty? and so on. A more concrete notion of procedure is obtained by instantiating the formal parameter \nt{list}: for instance, \nt{procedure}(\nt{plist}), where \nt{plist} is the parameterized nonterminal symbol defined earlier, is a valid application. \paragraph{Consistency} Definitions and uses of parameterized nonterminal symbols are checked for consistency before they are expanded away. In short, it is checked that, wherever a nonterminal symbol is used, it is supplied with actual arguments in appropriate number and of appropriate nature. This guarantees that expansion of parameterized definitions terminates and produces a well-formed grammar as its outcome. \subsection{Inlining} \label{sec:inline} It is well-known that the following grammar of arithmetic expressions does not work as expected: that is, in spite of the priority declarations, it has shift/reduce conflicts. % \begin{quote} \begin{tabular}{l} \dtoken \kangle{\basic{int}} \basic{INT} \\ \dtoken \basic{PLUS} \basic{TIMES} \\ \dleft \basic{PLUS} \\ \dleft \basic{TIMES} \\ \\ \percentpercent \\ \\ \nt{expression}: \newprod \basic{i} = \basic{INT} \dpaction{\basic{i}} \newprod \basic{e} = \nt{expression}; \basic{o} = \nt{op}; \basic{f} = \nt{expression} \dpaction{\basic{o} \basic{e} \basic{f}} \\ \nt{op}: \newprod \basic{PLUS} \dpaction{( + )} \newprod \basic{TIMES} \dpaction{( * )} \end{tabular} \end{quote} % The trouble is, the precedence level of the production \nt{expression} $\rightarrow$ \nt{expression} \nt{op} \nt{expression} is undefined, and there is no sensible way of defining it via a \dprec declaration, since the desired level really depends upon the symbol that was recognized by \nt{op}: was it \basic{PLUS} or \basic{TIMES}? The standard workaround is to abandon the definition of \nt{op} as a separate nonterminal symbol, and to inline its definition into the definition of \nt{expression}, like this: % \begin{quote} \begin{tabular}{l} \nt{expression}: \newprod \basic{i} = \basic{INT} \dpaction{\basic{i}} \newprod \basic{e} = \nt{expression}; \basic{PLUS}; \basic{f} = \nt{expression} \dpaction{\basic{e} + \basic{f}} \newprod \basic{e} = \nt{expression}; \basic{TIMES}; \basic{f} = \nt{expression} \dpaction{\basic{e} * \basic{f}} \end{tabular} \end{quote} % This avoids the shift/reduce conflict, but gives up some of the original specification's structure, which, in realistic situations, can be damageable. Fortunately, \menhir offers a way of avoiding the conflict without manually transforming the grammar, by declaring that the nonterminal symbol \nt{op} should be inlined: % \begin{quote} \begin{tabular}{l} \nt{expression}: \newprod \basic{i} = \basic{INT} \dpaction{\basic{i}} \newprod \basic{e} = \nt{expression}; \basic{o} = \nt{op}; \basic{f} = \nt{expression} \dpaction{\basic{o} \basic{e} \basic{f}} \\ \dinline \nt{op}: \newprod \basic{PLUS} \dpaction{( + )} \newprod \basic{TIMES} \dpaction{( * )} \end{tabular} \end{quote} % The \dinline keyword causes all references to \nt{op} to be replaced with its definition. In this example, the definition of \nt{op} involves two productions, one that develops to \basic{PLUS} and one that expands to \basic{TIMES}, so every production that refers to \nt{op} is effectively turned into two productions, one that refers to \basic{PLUS} and one that refers to \basic{TIMES}. After inlining, \nt{op} disappears and \nt{expression} has three productions: that is, the result of inlining is exactly the manual workaround shown above. In some situations, inlining can also help recover a slight efficiency margin. For instance, the definition: % \begin{quote} \begin{tabular}{l} \dinline \nt{plist}(\nt{X}): \newprod \basic{xs} = \nt{loption}(% \nt{delimited}(% \basic{LPAREN}, \nt{separated\_nonempty\_list}(\basic{COMMA}, \basic{X}), \basic{RPAREN}% )% ) \dpaction{\basic{xs}} \end{tabular} \end{quote} % effectively makes \nt{plist}(\nt{X}) an alias for the right-hand side \nt{loption}($\ldots$). Without the \dinline keyword, the language recognized by the grammar would be the same, but the LR automaton would probably have one more state and would perform one more reduction at run time. The \dinline keyword does not affect the computation of positions (\sref{sec:positions}). The same positions are computed, regardless of where \dinline keywords are placed. If the semantic actions have side effects, the \dinline keyword \emph{can} affect the order in which these side effects take place. In the example of \nt{op} and \nt{expression} above, if for some reason the semantic action associated with \nt{op} has a side effect (such as updating a global variable, or printing a message), then, by inlining \nt{op}, we delay this side effect, which takes place \emph{after} the second operand has been recognized, whereas in the absence of inlining it takes place as soon as the operator has been recognized. % Du coup, ça change l'ordre des effets, dans cet exemple, de infixe % à postfixe. \subsection{The standard library} \label{sec:library} \begin{figure} \begin{center} \begin{tabular}{lp{51mm}l@{}l} Name & Recognizes & Produces & Comment \\ \hline\\ % \nt{epsilon} & $\epsilon$ & \basic{unit} & (inlined) \\ % \\ \nt{endrule}(\nt{X}) & \nt{X} & $\alpha$, if \nt{X} : $\alpha$ & (inlined) \\ \nt{midrule}(\nt{X}) & \nt{X} & $\alpha$, if \nt{X} : $\alpha$ \\ \\ \nt{option}(\nt{X}) & $\epsilon$ \barre \nt{X} & $\alpha$ \basic{option}, if \nt{X} : $\alpha$ & (also \nt{X}\dquestion) \\ \nt{ioption}(\nt{X}) & $\epsilon$ \barre \nt{X} & $\alpha$ \basic{option}, if \nt{X} : $\alpha$ & (inlined) \\ \nt{boption}(\nt{X}) & $\epsilon$ \barre \nt{X} & \basic{bool} \\ \nt{loption}(\nt{X}) & $\epsilon$ \barre \nt{X} & $\alpha$ \basic{list}, if \nt{X} : $\alpha$ \nt{list} \\ \\ \nt{pair}(\nt{X}, \nt{Y}) & \nt{X} \nt{Y} & $\alpha\times\beta$, if \nt{X} : $\alpha$ and \nt{Y} : $\beta$ \\ \nt{separated\_pair}(\nt{X}, \nt{sep}, \nt{Y}) & \nt{X} \nt{sep} \nt{Y} & $\alpha\times\beta$, if \nt{X} : $\alpha$ and \nt{Y} : $\beta$ \\ \nt{preceded}(\nt{opening}, \nt{X}) & \nt{opening} \nt{X} & $\alpha$, if \nt{X} : $\alpha$ \\ \nt{terminated}(\nt{X}, \nt{closing}) & \nt{X} \nt{closing} & $\alpha$, if \nt{X} : $\alpha$ \\ \nt{delimited}(\nt{opening}, \nt{X}, \nt{closing}) & \nt{opening} \nt{X} \nt{closing} & $\alpha$, if \nt{X} : $\alpha$ \\ \\ \nt{list}(\nt{X}) & a possibly empty sequence of \nt{X}'s & $\alpha$ \basic{list}, if \nt{X} : $\alpha$ & (also \nt{X}\dstar) \\ \nt{nonempty\_list}(\nt{X}) & a nonempty sequence of \nt{X}'s & $\alpha$ \basic{list}, if \nt{X} : $\alpha$ & (also \nt{X}\dplus) \\ \nt{separated\_list}(\nt{sep}, \nt{X}) & a possibly empty sequence of \nt{X}'s separated with \nt{sep}'s & $\alpha$ \basic{list}, if \nt{X} : $\alpha$ \\ \nt{separated\_nonempty\_list}(\nt{sep}, \nt{X}) & a nonempty sequence of \nt{X}'s \hspace{2mm} separated with \nt{sep}'s & $\alpha$ \basic{list}, if \nt{X} : $\alpha$ \\ \\ \nt{rev}(\nt{X}) & \nt{X} & $\alpha$ \basic{list}, if \nt{X} : $\alpha$ \basic{list} & (inlined) \\ \nt{flatten}(\nt{X}) & \nt{X} & $\alpha$ \basic{list}, if \nt{X} : $\alpha$ \basic{list} \basic{list} & (inlined) \\ \nt{append}(\nt{X}, \nt{Y}) & \nt{X} \nt{Y} & $\alpha$ \basic{list}, if \nt{X}, \nt{Y} : $\alpha$ \basic{list} & (inlined) \\ \end{tabular} \end{center} \caption{Summary of the standard library; see \standardmly for details} \label{fig:standard} \end{figure} Once equipped with a rudimentary module system (\sref{sec:split}), parameterization (\sref{sec:templates}), and inlining (\sref{sec:inline}), it is straightforward to propose a collection of commonly used definitions, such as options, sequences, lists, and so on. This \emph{standard library} is joined, by default, with every grammar specification. A summary of the nonterminal symbols offered by the standard library appears in \fref{fig:standard}. See also the short-hands documented in \fref{fig:sugar}. By relying on the standard library, a client module can concisely define more elaborate notions. For instance, the following rule: % \begin{quote} \begin{tabular}{l} \dinline \nt{plist}(\nt{X}): \newprod \basic{xs} = \nt{loption}(% \nt{delimited}(% \basic{LPAREN}, \nt{separated\_nonempty\_list}(\basic{COMMA}, \basic{X}), \basic{RPAREN}% )% ) \dpaction{\basic{xs}} \end{tabular} \end{quote} % causes \nt{plist}(\nt{X}) to recognize a list of \nt{X}'s, where the empty list is represented by the empty string, and a non-empty list is delimited with parentheses and comma-separated. The standard library is stored in a file named \standardmly, which is installed at the same time as \menhir. By default, \menhir attempts to find this file in the directory where this file was installed. This can be overridden by setting the environment variable \verb+$MENHIR_STDLIB+. If defined, this variable should contain the path of the directory where \texttt{standard.mly} is stored. (This path may end with a \texttt{/} character.) This can be overridden also via the command line switch \ostdlib. % The command line switch \onostdlib instructs \menhir to \emph{not} load the standard library. The meaning of the symbols defined in the standard library (\fref{fig:standard}) should be clear in most cases. Yet, the symbols \nt{endrule}(\nt{X}) and \nt{midrule}(\nt{X}) deserve an explanation. Both take an argument \nt{X}, which typically will be instantiated with an anonymous rule (\sref{sec:actual}). Both are defined as a synonym for \nt{X}. In both cases, this allows placing an anonymous subrule in the middle of a rule. \newcommand{\AAA}{\nt{cat}} \newcommand{\BBB}{\nt{dog}} \newcommand{\CCC}{\nt{cow}} \newcommand{\XXX}{\nt{xxx}} For instance, the following is a well-formed production: % \[\begin{array}{l} \AAA \quad \nt{endrule}(\BBB \quad \dpaction{\nt{\ocaml code$_1$}}) \quad \CCC \quad \dpaction{\nt{\ocaml code$_2$}} \end{array}\] % This production consists of three producers, namely \AAA{} and \nt{endrule}(\BBB$\;$\dpaction{\nt{\ocaml code$_1$}}) and \CCC, and a semantic action \dpaction{\nt{\ocaml code$_2$}}. % Because \nt{endrule}(\nt{X}) is declared as an \dinline synonym for \nt{X}, the expansion of anonymous rules (\sref{sec:actual}), followed with the expansion of \dinline symbols (\sref{sec:inline}), transforms the above production into the following: % \[\begin{array}{l} \AAA \quad \BBB \quad \CCC \quad \dpaction{\nt{\ocaml code$_1$; \ocaml code$_2$}} \end{array}\] % Note that \nt{\ocaml code$_1$} moves to the end of the rule, which means that this code is executed only after \AAA, \BBB{} and \CCC{} have been recognized. In this example, the use of \nt{endrule} is rather pointless, as the expanded code is more concise and clearer than the original code. Still, \nt{endrule} can be useful when its actual argument is an anonymous rule with multiple branches. % Let me *not* show an example. See the comments in standard.mly. \nt{midrule} is used in exactly the same way as \nt{endrule}, but its expansion is different. For instance, the following is a well-formed production: % \[\begin{array}{l} \AAA \quad \nt{midrule}(\dpaction{\nt{\ocaml code$_1$}}) \quad \CCC \quad \dpaction{\nt{\ocaml code$_2$}} \end{array}\] % (There is no \BBB{} in this example; this is intentional.) Because \nt{midrule}(\nt{X}) is a synonym for \nt{X}, but is not declared \dinline, the expansion of anonymous rules (\sref{sec:actual}), followed with the expansion of \dinline symbols (\sref{sec:inline}), transforms the above production into the following: % \[\begin{array}{l} \AAA \quad \XXX \quad \CCC \quad \dpaction{\nt{\ocaml code$_2$}} \end{array}\] % where the fresh nonterminal symbol $\XXX$ is separately defined by the rule $\XXX: \dpaction{\nt{\ocaml code$_1$}}$. Thus, $\XXX$ recognizes the empty string, and as soon as it is recognized, \nt{\ocaml code$_1$} is executed. This is known as a ``mid-rule action''. % https://www.gnu.org/software/bison/manual/html_node/Mid_002dRule-Actions.html % ------------------------------------------------------------------------------ \section{Conflicts} \label{sec:conflicts} When a shift/reduce or reduce/reduce conflict is detected, it is classified as either benign, if it can be resolved by consulting user-supplied precedence declarations, or severe, if it cannot. Benign conflicts are not reported. Severe conflicts are reported and, if the \oexplain switch is on, explained. \subsection{When is a conflict benign?} \label{sec:conflicts:benign} A shift/reduce conflict involves a single token (the one that one might wish to shift) and one or more productions (those that one might wish to reduce). When such a conflict is detected, the precedence level (\sref{sec:assoc}, \sref{sec:prec}) of these entities are looked up and compared as follows: \begin{enumerate} \item if only one production is involved, and if it has higher priority than the token, then the conflict is resolved in favor of reduction. \item if only one production is involved, and if it has the same priority as the token, then the associativity status of the token is looked up: \begin{enumerate} \item if the token was declared nonassociative, then the conflict is resolved in favor of neither action, that is, a syntax error will be signaled if this token shows up when this production is about to be reduced; \item if the token was declared left-associative, then the conflict is resolved in favor of reduction; \item if the token was declared right-associative, then the conflict is resolved in favor of shifting. \end{enumerate} \item \label{multiway} if multiple productions are involved, and if, considered one by one, they all cause the conflict to be resolved in the same way (that is, either in favor in shifting, or in favor of neither), then the conflict is resolved in that way. \end{enumerate} In either of these cases, the conflict is considered benign. Otherwise, it is considered severe. Note that a reduce/reduce conflict is always considered severe, unless it happens to be subsumed by a benign multi-way shift/reduce conflict (item~\ref{multiway} above). \subsection{How are severe conflicts explained?} When the \odump switch is on, a description of the automaton is written to the \automaton file. Severe conflicts are shown as part of this description. Fortunately, there is also a way of understanding conflicts in terms of the grammar, rather than in terms of the automaton. When the \oexplain switch is on, a textual explanation is written to the \conflicts file. \emph{Not all conflicts are explained} in this file: instead, \emph{only one conflict per automaton state is explained}. This is done partly in the interest of brevity, but also because Pager's algorithm can create artificial conflicts in a state that already contains a true LR(1) conflict; thus, one cannot hope in general to explain all of the conflicts that appear in the automaton. As a result of this policy, once all conflicts explained in the \conflicts file have been fixed, one might need to run \menhir again to produce yet more conflict explanations. \begin{figure} \begin{quote} \begin{tabular}{l} \dtoken \basic{IF THEN ELSE} \\ \dstart \kangle{\basic{expression}} \nt{expression} \\ \\ \percentpercent \\ \\ \nt{expression}: \newprod $\ldots$ \newprod \basic{IF b} = \nt{expression} \basic{THEN e} = \nt{expression} \dpaction{$\ldots$} \newprod \basic{IF b} = \nt{expression} \basic{THEN e} = \nt{expression} \basic{ELSE f} = \nt{expression} \dpaction{$\ldots$} \newprod $\ldots$ \end{tabular} \end{quote} \caption{Basic example of a shift/reduce conflict} \label{fig:basicshiftreduce} \end{figure} \paragraph{How the conflict state is reached} \fref{fig:basicshiftreduce} shows a grammar specification with a typical shift/reduce conflict. % When this specification is analyzed, the conflict is detected, and an explanation is written to the \conflicts file. The explanation first indicates in which state the conflict lies by showing how that state is reached. Here, it is reached after recognizing the following string of terminal and nonterminal symbols---the \emph{conflict string}: % \begin{quote} \basic{IF expression THEN IF expression THEN expression} \end{quote} Allowing the conflict string to contain both nonterminal and terminal symbols usually makes it shorter and more readable. If desired, a conflict string composed purely of terminal symbols could be obtained by replacing each occurrence of a nonterminal symbol $N$ with an arbitrary $N$-sentence. The conflict string can be thought of as a path that leads from one of the automaton's start states to the conflict state. When multiple such paths exist, the one that is displayed is chosen shortest. Nevertheless, it may sometimes be quite long. In that case, artificially (and temporarily) declaring some existing nonterminal symbols to be start symbols has the effect of adding new start states to the automaton and can help produce shorter conflict strings. Here, \nt{expression} was declared to be a start symbol, which is why the conflict string is quite short. In addition to the conflict string, the \conflicts file also states that the \emph{conflict token} is \basic{ELSE}. That is, when the automaton has recognized the conflict string and when the lookahead token (the next token on the input stream) is \basic{ELSE}, a conflict arises. A conflict corresponds to a choice: the automaton is faced with several possible actions, and does not know which one should be taken. This indicates that the grammar is not LR(1). The grammar may or may not be inherently ambiguous. In our example, the conflict string and the conflict token are enough to understand why there is a conflict: when two \basic{IF} constructs are nested, it is ambiguous which of the two constructs the \basic{ELSE} branch should be associated with. Nevertheless, the \conflicts file provides further information: it explicitly shows that there exists a conflict, by proving that two distinct actions are possible. Here, one of these actions consists in \emph{shifting}, while the other consists in \emph{reducing}: this is a \emph{shift/reduce} conflict. A \emph{proof} takes the form of a \emph{partial derivation tree} whose \emph{fringe} begins with the conflict string, followed by the conflict token. A derivation tree is a tree whose nodes are labeled with symbols. The root node carries a start symbol. A node that carries a terminal symbol is considered a leaf, and has no children. A node that carries a nonterminal symbol $N$ either is considered a leaf, and has no children; or is not considered a leaf, and has $n$ children, where $n\geq 0$, labeled $\nt{x}_1,\ldots,\nt{x}_n$, where $N \rightarrow \nt{x}_1,\ldots,\nt{x}_n$ is a production. The fringe of a partial derivation tree is the string of terminal and nonterminal symbols carried by the tree's leaves. A string of terminal and nonterminal symbols that is the fringe of some partial derivation tree is a \emph{sentential form}. \paragraph{Why shifting is legal} \begin{figure} \mycommonbaseline \begin{center} \begin{heveapicture} \begin{tikzpicture}[level distance=12mm] \node { \nt{expression} } child { node {\basic{IF}} } child { node {\nt{expression}} } child { node {\basic{THEN}} } child { node {\nt{expression}} child { node {\basic{IF}} } child { node {\nt{expression}} } child { node {\basic{THEN}} } child { node {\nt{expression}} } child { node {\basic{ELSE}} } child { node {\nt{expression}} } } ; \end{tikzpicture} \end{heveapicture} \end{center} \caption{A partial derivation tree that justifies shifting} \label{fig:shifting:tree} \end{figure} \begin{figure} \begin{center} \begin{tabbing} \= \nt{expression} \\ \> \basic{IF} \nt{expression} \basic{THEN} \= \nt{expression} \\ \> \> \basic{IF} \nt{expression} \basic{THEN} \basic{expression} . \basic{ELSE} \nt{expression} \end{tabbing} \end{center} \caption{A textual version of the tree in \fref{fig:shifting:tree}} \label{fig:shifting:text} \end{figure} In our example, the proof that shifting is possible is the derivation tree shown in Figures~\ref{fig:shifting:tree} and~\ref{fig:shifting:text}. At the root of the tree is the grammar's start symbol, \nt{expression}. This symbol develops into the string \nt{IF expression THEN expression}, which forms the tree's second level. The second occurrence of \nt{expression} in that string develops into \nt{IF expression THEN expression ELSE expression}, which forms the tree's last level. The tree's fringe, a sentential form, is the string \nt{IF expression THEN IF expression THEN expression ELSE expression}. As announced earlier, it begins with the conflict string \nt{IF expression THEN IF expression THEN expression}, followed with the conflict token \nt{ELSE}. In \fref{fig:shifting:text}, the end of the conflict string is materialized with a dot. Note that this dot does not occupy the rightmost position in the tree's last level. In other words, the conflict token (\basic{ELSE}) itself occurs on the tree's last level. In practical terms, this means that, after the automaton has recognized the conflict string and peeked at the conflict token, it makes sense for it to \emph{shift} that token. \paragraph{Why reducing is legal} \begin{figure} \mycommonbaseline \begin{center} \begin{heveapicture} \begin{tikzpicture}[level distance=12mm] \node { \nt{expression} } child { node {\basic{IF}} } child { node {\nt{expression}} } child { node {\basic{THEN}} } child { node {\nt{expression}} child { node {\basic{IF}} } child { node {\nt{expression}} } child { node {\basic{THEN}} } child { node {\nt{expression}} } } child { node {\basic{ELSE}} } child { node {\nt{expression}} } ; \end{tikzpicture} \end{heveapicture} \end{center} \caption{A partial derivation tree that justifies reducing} \label{fig:reducing:tree} \end{figure} \begin{figure} \begin{center} \begin{tabbing} \= \nt{expression} \\ \> \basic{IF} \nt{expression} \basic{THEN} \= \nt{expression} \basic{ELSE} \nt{expression} \sidecomment{lookahead token appears} \\ \> \> \basic{IF} \nt{expression} \basic{THEN} \basic{expression} . \end{tabbing} \end{center} \caption{A textual version of the tree in \fref{fig:reducing:tree}} \label{fig:reducing:text} \end{figure} In our example, the proof that shifting is possible is the derivation tree shown in Figures~\ref{fig:reducing:tree} and~\ref{fig:reducing:text}. Again, the sentential form found at the fringe of the tree begins with the conflict string, followed with the conflict token. Again, in \fref{fig:reducing:text}, the end of the conflict string is materialized with a dot. Note that, this time, the dot occupies the rightmost position in the tree's last level. In other words, the conflict token (\basic{ELSE}) appeared on an earlier level (here, on the second level). This fact is emphasized by the comment \inlinesidecomment{lookahead token appears} found at the second level. In practical terms, this means that, after the automaton has recognized the conflict string and peeked at the conflict token, it makes sense for it to \emph{reduce} the production that corresponds to the tree's last level---here, the production is \nt{expression} $\rightarrow$ \basic{IF} \nt{expression} \basic{THEN} \basic{expression}. \paragraph{An example of a more complex derivation tree} Figures~\ref{fig:xreducing:tree} and~\ref{fig:xreducing:text} show a partial derivation tree that justifies reduction in a more complex situation. (This derivation tree is relative to a grammar that is not shown.) Here, the conflict string is \basic{DATA UIDENT EQUALS UIDENT}; the conflict token is \basic{LIDENT}. It is quite clear that the fringe of the tree begins with the conflict string. However, in this case, the fringe does not explicitly exhibit the conflict token. Let us examine the tree more closely and answer the question: following \basic{UIDENT}, what's the next terminal symbol on the fringe? \begin{figure} \mycommonbaseline \begin{center} \begin{heveapicture} \begin{tikzpicture}[level distance=12mm,level 1/.style={sibling distance=18mm}, level 2/.style={sibling distance=18mm}, level 4/.style={sibling distance=24mm}]] \node { \nt{decls} } child { node {\nt{decl}} child { node {\basic{DATA}} } child { node {\basic{UIDENT}} } child { node {\basic{EQUALS}} } child { node {\nt{tycon\_expr}} child { node {\nt{tycon\_item}} child { node {\basic{UIDENT}} } child { node {\nt{opt\_type\_exprs}} child { node {} edge from parent [dashed] } } } } } child { node {\nt{opt\_semi}} } child { node {\nt{decls}} } ; \end{tikzpicture} \end{heveapicture} \end{center} \caption{A partial derivation tree that justifies reducing} \label{fig:xreducing:tree} \end{figure} \begin{figure} \begin{center} \begin{tabbing} \= \nt{decls} \\ \> \nt{decl} \nt{opt\_semi} \nt{decls} \sidecomment{lookahead token appears because \nt{opt\_semi} can vanish and \nt{decls} can begin with \basic{LIDENT}} \\ \> \basic{DATA UIDENT} \basic{EQUALS} \= \nt{tycon\_expr} \sidecomment{lookahead token is inherited} \\ \> \> \nt{tycon\_item} \sidecomment{lookahead token is inherited} \\ \> \> \basic{UIDENT} \= \nt{opt\_type\_exprs} \sidecomment{lookahead token is inherited} \\ \> \> \> . \end{tabbing} \end{center} \caption{A textual version of the tree in \fref{fig:xreducing:tree}} \label{fig:xreducing:text} \end{figure} % TEMPORARY the HTML rendering of this figure isn't good First, note that \nt{opt\_type\_exprs} is \emph{not} a leaf node, even though it has no children. The grammar contains the production $\nt{opt\_type\_exprs} \rightarrow \epsilon$: the nonterminal symbol \nt{opt\_type\_exprs} develops to the empty string. (This is made clear in \fref{fig:xreducing:text}, where a single dot appears immediately below \nt{opt\_type\_exprs}.) Thus, \nt{opt\_type\_exprs} is not part of the fringe. Next, note that \nt{opt\_type\_exprs} is the rightmost symbol within its level. Thus, in order to find the next symbol on the fringe, we have to look up one level. This is the meaning of the comment \inlinesidecomment{lookahead token is inherited}. Similarly, \nt{tycon\_item} and \nt{tycon\_expr} appear rightmost within their level, so we again have to look further up. This brings us back to the tree's second level. There, \nt{decl} is \emph{not} the rightmost symbol: next to it, we find \nt{opt\_semi} and \nt{decls}. Does this mean that \nt{opt\_semi} is the next symbol on the fringe? Yes and no. \nt{opt\_semi} is a \emph{nonterminal} symbol, but we are really interested in finding out what the next \emph{terminal} symbol on the fringe could be. The partial derivation tree shown in Figures~\ref{fig:xreducing:tree} and~\ref{fig:xreducing:text} does not explicitly answer this question. In order to answer it, we need to know more about \nt{opt\_semi} and \nt{decls}. Here, \nt{opt\_semi} stands (as one might have guessed) for an optional semicolon, so the grammar contains a production $\nt{opt\_semi} \rightarrow \epsilon$. This is indicated by the comment \inlinesidecomment{\nt{opt\_semi} can vanish}. (Nonterminal symbols that generate $\epsilon$ are also said to be \emph{nullable}.) Thus, one could choose to turn this partial derivation tree into a larger one by developing \nt{opt\_semi} into $\epsilon$, making it a non-leaf node. That would yield a new partial derivation tree where the next symbol on the fringe, following \basic{UIDENT}, is \nt{decls}. Now, what about \nt{decls}? Again, it is a \emph{nonterminal} symbol, and we are really interested in finding out what the next \emph{terminal} symbol on the fringe could be. Again, we need to imagine how this partial derivation tree could be turned into a larger one by developing \nt{decls}. Here, the grammar happens to contain a production of the form $\nt{decls} \rightarrow \basic{LIDENT} \ldots$ This is indicated by the comment \inlinesidecomment{\nt{decls} can begin with \basic{LIDENT}}. Thus, by developing \nt{decls}, it is possible to construct a partial derivation tree where the next symbol on the fringe, following \basic{UIDENT}, is \basic{LIDENT}. This is precisely the conflict token. To sum up, there exists a partial derivation tree whose fringe begins with the conflict string, followed with the conflict token. Furthermore, in that derivation tree, the dot occupies the rightmost position in the last level. As in our previous example, this means that, after the automaton has recognized the conflict string and peeked at the conflict token, it makes sense for it to \emph{reduce} the production that corresponds to the tree's last level---here, the production is $\nt{opt\_type\_exprs} \rightarrow \epsilon$. \paragraph{Greatest common factor among derivation trees} Understanding conflicts requires comparing two (or more) derivation trees. It is frequent for these trees to exhibit a common factor, that is, to exhibit identical structure near the top of the tree, and to differ only below a specific node. Manual identification of that node can be tedious, so \menhir performs this work automatically. When explaining a $n$-way conflict, it first displays the greatest common factor of the $n$ derivation trees. A question mark symbol $\basic{(?)}$ is used to identify the node where the trees begin to differ. Then, \menhir displays each of the $n$ derivation trees, \emph{without their common factor} -- that is, it displays $n$ sub-trees that actually begin to differ at the root. This should make visual comparisons significantly easier. \subsection{How are severe conflicts resolved in the end?} It is unspecified how severe conflicts are resolved. \menhir attempts to mimic \ocamlyacc's specification, that is, to resolve shift/reduce conflicts in favor of shifting, and to resolve reduce/reduce conflicts in favor of the production that textually appears earliest in the grammar specification. However, this specification is inconsistent in case of three-way conflicts, that is, conflicts that simultaneously involve a shift action and several reduction actions. Furthermore, textual precedence can be undefined when the grammar specification is split over multiple modules. In short, \menhir's philosophy is that \begin{center} severe conflicts should not be tolerated, \end{center} so you should not care how they are resolved. % If a shift/reduce conflict is resolved in favor of reduction, then there can % exist words of terminal symbols that are accepted by the canonical LR(1) % automaton without traversing any conflict state and which are rejected by our % automaton (constructed by Pager's method followed by conflict % resolution). Same problem when a shift/reduce conflict is resolved in favor of % neither action (via \dnonassoc) or when a reduce/reduce conflict is resolved % arbitrarily. \subsection{End-of-stream conflicts} \label{sec:eos} \menhir's treatment of the end of the token stream is (believed to be) fully compatible with \ocamlyacc's. Yet, \menhir attempts to be more user-friendly by warning about a class of so-called ``end-of-stream conflicts''. % TEMPORARY il faut noter que \menhir n'est pas conforme à ocamlyacc en % présence de conflits end-of-stream; apparemment il part dans le mur % en exigeant toujours le token suivant, alors que ocamlyacc est capable % de s'arrêter (comment?); cf. problème de S. Hinderer (avril 2015). \paragraph{How the end of stream is handled} In many textbooks on parsing, it is assumed that the lexical analyzer, which produces the token stream, produces a special token, written \eos, to signal that the end of the token stream has been reached. A parser generator can take advantage of this by transforming the grammar: for each start symbol $\nt{S}$ in the original grammar, a new start symbol $\nt{S'}$ is defined, together with the production $S'\rightarrow S\eos$. The symbol $S$ is no longer a start symbol in the new grammar. This means that the parser will accept a sentence derived from $S$ only if it is immediately followed by the end of the token stream. This approach has the advantage of simplicity. However, \ocamlyacc and \menhir do not follow it, for several reasons. Perhaps the most convincing one is that it is not flexible enough: sometimes, it is desirable to recognize a sentence derived from $S$, \emph{without} requiring that it be followed by the end of the token stream: this is the case, for instance, when reading commands, one by one, on the standard input channel. In that case, there is no end of stream: the token stream is conceptually infinite. Furthermore, after a command has been recognized, we do \emph{not} wish to examine the next token, because doing so might cause the program to block, waiting for more input. In short, \ocamlyacc and \menhir's approach is to recognize a sentence derived from $S$ and to \emph{not look}, if possible, at what follows. However, this is possible only if the definition of $S$ is such that the end of an $S$-sentence is identifiable without knowledge of the lookahead token. When the definition of $S$ does not satisfy this criterion, and \emph{end-of-stream conflict} arises: after a potential $S$-sentence has been read, there can be a tension between consulting the next token, in order to determine whether the sentence is continued, and \emph{not} consulting the next token, because the sentence might be over and whatever follows should not be read. \menhir warns about end-of-stream conflicts, whereas \ocamlyacc does not. \paragraph{A definition of end-of-stream conflicts} Technically, \menhir proceeds as follows. A \eos symbol is introduced. It is, however, only a \emph{pseudo-}token: it is never produced by the lexical analyzer. For each start symbol $\nt{S}$ in the original grammar, a new start symbol $\nt{S'}$ is defined, together with the production $S'\rightarrow S$. The corresponding start state of the LR(1) automaton is composed of the LR(1) item $S' \rightarrow . \;S\; [\eos]$. That is, the pseudo-token \eos initially appears in the lookahead set, indicating that we expect to be done after recognizing an $S$-sentence. During the construction of the LR(1) automaton, this lookahead set is inherited by other items, with the effect that, in the end, the automaton has: \begin{itemize} \item \emph{shift} actions only on physical tokens; and \item \emph{reduce} actions either on physical tokens or on the pseudo-token \eos. \end{itemize} A state of the automaton has a reduce action on \eos if, in that state, an $S$-sentence has been read, so that the job is potentially finished. A state has a shift or reduce action on a physical token if, in that state, more tokens potentially need to be read before an $S$-sentence is recognized. If a state has a reduce action on \eos, then that action should be taken \emph{without} requesting the next token from the lexical analyzer. On the other hand, if a state has a shift or reduce action on a physical token, then the lookahead token \emph{must} be consulted in order to determine if that action should be taken. \begin{figure}[p] \begin{quote} \begin{tabular}{l} \dtoken \kangle{\basic{int}} \basic{INT} \\ \dtoken \basic{PLUS TIMES} \\ \dleft PLUS \\ \dleft TIMES \\ \dstart \kangle{\basic{int}} \nt{expr} \\ \percentpercent \\ \nt{expr}: \newprod \basic{i} = \basic{INT} \dpaction{\basic{i}} \newprod \basic{e1} = \nt{expr} \basic{PLUS} \basic{e2} = \nt{expr} \dpaction{\basic{e1 + e2}} \newprod \basic{e1} = \nt{expr} \basic{TIMES} \basic{e2} = \nt{expr} \dpaction{\basic{e1 * e2}} \end{tabular} \end{quote} \caption{Basic example of an end-of-stream conflict} \label{fig:basiceos} \end{figure} \begin{figure}[p] \begin{verbatim} State 6: expr -> expr . PLUS expr [ # TIMES PLUS ] expr -> expr PLUS expr . [ # TIMES PLUS ] expr -> expr . TIMES expr [ # TIMES PLUS ] -- On TIMES shift to state 3 -- On # PLUS reduce production expr -> expr PLUS expr State 4: expr -> expr . PLUS expr [ # TIMES PLUS ] expr -> expr . TIMES expr [ # TIMES PLUS ] expr -> expr TIMES expr . [ # TIMES PLUS ] -- On # TIMES PLUS reduce production expr -> expr TIMES expr State 2: expr' -> expr . [ # ] expr -> expr . PLUS expr [ # TIMES PLUS ] expr -> expr . TIMES expr [ # TIMES PLUS ] -- On TIMES shift to state 3 -- On PLUS shift to state 5 -- On # accept expr \end{verbatim} \caption{Part of an LR automaton for the grammar in \fref{fig:basiceos}} \label{fig:basiceosdump} \end{figure} \begin{figure}[p] \begin{quote} \begin{tabular}{l} \ldots \\ \dtoken \basic{END} \\ \dstart \kangle{\basic{int}} \nt{main} \hspace{1cm} \textit{// instead of \nt{expr}} \\ \percentpercent \\ \nt{main}: \newprod \basic{e} = \nt{expr} \basic{END} \dpaction{\basic{e}} \\ \nt{expr}: \newprod \ldots \end{tabular} \end{quote} \caption{Fixing the grammar specification in \fref{fig:basiceos}} \label{fig:basiceos:sol} \end{figure} An end-of-stream conflict arises when a state has distinct actions on \eos and on at least one physical token. In short, this means that the end of an $S$-sentence cannot be unambiguously identified without examining one extra token. \menhir's default behavior, in that case, is to suppress the action on \eos, so that more input is \emph{always} requested. \paragraph{Example} \fref{fig:basiceos} shows a grammar that has end-of-stream conflicts. When this grammar is processed, \menhir warns about these conflicts, and further warns that \nt{expr} is never accepted. Let us explain. Part of the corresponding automaton, as described in the \automaton file, is shown in \fref{fig:basiceosdump}. Explanations at the end of the \automaton file (not shown) point out that states 6 and 2 have an end-of-stream conflict. Indeed, both states have distinct actions on \eos and on the physical token \basic{TIMES}. % It is interesting to note that, even though state 4 has actions on \eos and on physical tokens, it does not have an end-of-stream conflict. This is because the action taken in state 4 is always to reduce the production $\nt{expr} \rightarrow \nt{expr}$ \basic{TIMES} \nt{expr}, regardless of the lookahead token. By default, \menhir produces a parser where end-of-stream conflicts are resolved in favor of looking ahead: that is, the problematic reduce actions on \eos are suppressed. This means, in particular, that the \emph{accept} action in state 2, which corresponds to reducing the production $\nt{expr} \rightarrow \nt{expr'}$, is suppressed. This explains why the symbol \nt{expr} is never accepted: because expressions do not have an unambiguous end marker, the parser will always request one more token and will never stop. In order to avoid this end-of-stream conflict, the standard solution is to introduce a new token, say \basic{END}, and to use it as an end marker for expressions. The \basic{END} token could be generated by the lexical analyzer when it encounters the actual end of stream, or it could correspond to a piece of concrete syntax, say, a line feed character, a semicolon, or an \texttt{end} keyword. The solution is shown in \fref{fig:basiceos:sol}. % ------------------------------------------------------------------------------ \section{Positions} \label{sec:positions} When an \ocamllex-generated lexical analyzer produces a token, it updates two fields, named \verb+lex_start_p+ and \verb+lex_curr_p+, in its environment record, whose type is \verb+Lexing.lexbuf+. Each of these fields holds a value of type \verb+Lexing.position+. Together, they represent the token's start and end positions within the text that is being scanned. These fields are read by \menhir after calling the lexical analyzer, so \textbf{it is the lexical analyzer's responsibility} to correctly set these fields. A position consists mainly of an offset (the position's \verb+pos_cnum+ field), but also holds information about the current file name, the current line number, and the current offset within the current line. (Not all \ocamllex-generated analyzers keep this extra information up to date. This must be explicitly programmed by the author of the lexical analyzer.) \begin{figure} \begin{center} \begin{tabular}{@{}l@{\hspace{7.0mm}}l@{}} \verb+$startpos+ & start position of the first symbol in the production's right-hand side, if there is one; \\& end position of the most recently parsed symbol, otherwise \\ \verb+$endpos+ & end position of the last symbol in the production's right-hand side, if there is one; \\& end position of the most recently parsed symbol, otherwise \\ \verb+$startpos(+ \verb+$+\nt{i} \barre \nt{id} \verb+)+ & start position of the symbol named \verb+$+\nt{i} or \nt{id} \\ \verb+$endpos(+ \verb+$+\nt{i} \barre \nt{id} \verb+)+ & end position of the symbol named \verb+$+\nt{i} or \nt{id} \\ \ksymbolstartpos & start position of the leftmost symbol \nt{id} such that \verb+$startpos(+\nt{id}\verb+)+ \verb+!=+\, \verb+$endpos(+\nt{id}\verb+)+; \\& if there is no such symbol, \verb+$endpos+ \\[2mm] % \verb+$startofs+ \\ \verb+$endofs+ \\ \verb+$startofs(+ \verb+$+\nt{i} \barre \nt{id} \verb+)+ & same as above, but produce an integer offset instead of a position \\ \verb+$endofs(+ \verb+$+\nt{i} \barre \nt{id} \verb+)+ \\ \verb+$symbolstartofs+ \\[2mm] % \verb+$loc+ & stands for the pair \verb+($startpos, $endpos)+ \\ \verb+$loc(+ \nt{id} \verb+)+ & stands for the pair \verb+($startpos(+ \nt{id} \verb+), $endpos(+ \nt{id} \verb+))+ \\ % $loc($i)$ works too, % but is not documented, % as that would be visually heavy % and its use is not encouraged anyway. \verb+$sloc+ & stands for the pair \verb+($symbolstartpos, $endpos)+ \\ \end{tabular} \end{center} \caption{Position-related keywords} \label{fig:pos} \end{figure} % We could document $endpos($0). Not sure whether that would be a good thing. \begin{figure} \begin{tabular}{@{}ll@{\hspace{2cm}}l} % Positions. \verb+symbol_start_pos()+ & \ksymbolstartpos \\ \verb+symbol_end_pos()+ & \verb+$endpos+ \\ \verb+rhs_start_pos i+ & \verb+$startpos($i)+ & ($1 \leq i \leq n$) \\ \verb+rhs_end_pos i+ & \verb+$endpos($i)+ & ($1 \leq i \leq n$) \\ % i = 0 permitted, really % Offsets. \verb+symbol_start()+ & \verb+$symbolstartofs+ \\ \verb+symbol_end()+ & \verb+$endofs+ \\ \verb+rhs_start i+ & \verb+$startofs($i)+ & ($1 \leq i \leq n$) \\ \verb+rhs_end i+ & \verb+$endofs($i)+ & ($1 \leq i \leq n$) \\ % i = 0 permitted, really \end{tabular} \caption{Translating position-related incantations from \ocamlyacc to \menhir} \label{fig:pos:mapping} \end{figure} This mechanism allows associating pairs of positions with terminal symbols. If desired, \menhir automatically extends it to nonterminal symbols as well. That is, it offers a mechanism for associating pairs of positions with terminal or nonterminal symbols. This is done by making a set of keywords available to semantic actions (\fref{fig:pos}). These keywords are \emph{not} available outside of a semantic action: in particular, they cannot be used within an \ocaml header. \ocaml's standard library module \texttt{Parsing} is deprecated. The functions that it offers \emph{can} be called, but will return dummy positions. We remark that, if the current production has an empty right-hand side, then \verb+$startpos+ and \verb+$endpos+ are equal, and (by convention) are the end position of the most recently parsed symbol (that is, the symbol that happens to be on top of the automaton's stack when this production is reduced). If the current production has a nonempty right-hand side, then \verb+$startpos+ is the same as \verb+$startpos($1)+ and \verb+$endpos+ is the same as \verb+$endpos($+\nt{n}\verb+)+, where \nt{n} is the length of the right-hand side. More generally, if the current production has matched a sentence of length zero, then \verb+$startpos+ and \verb+$endpos+ will be equal, and conversely. % (provided the lexer is reasonable and never produces a token whose start and % end positions are equal). The position \verb+$startpos+ is sometimes ``further towards the left'' than one would like. For example, in the following production: \begin{verbatim} declaration: modifier? variable { $startpos } \end{verbatim} the keyword \verb+$startpos+ represents the start position of the optional modifier \verb+modifier?+. If this modifier turns out to be absent, then its start position is (by definition) the end position of the most recently parsed symbol. This may not be what is desired: perhaps the user would prefer in this case to use the start position of the symbol \verb+variable+. This is achieved by using \ksymbolstartpos instead of \verb+$startpos+. By definition, \ksymbolstartpos is the start position of the leftmost symbol whose start and end positions differ. In this example, the computation of \ksymbolstartpos skips the absent \verb+modifier+, whose start and end positions coincide, and returns the start position of the symbol \verb+variable+ (assuming this symbol has distinct start and end positions). % On pourrait souligner que $symbolstartpos renvoie la $startpos du premier % symbole non vide, et non pas la $symbolstartpos du premier symbole non vide. % Donc ça peut rester un peu contre-intuitif, et ne pas correspondre % exactement à ce que l'on attend. D'ailleurs, le calcul de $symbolstartpos % est préservé par %inline (on obtient cela très facilement en éliminant % $symbolstartpos avant l'inlining) mais ne correspond pas à ce que donnerait % $symbolstartpos après un inlining manuel. Fondamentalement, cette notion de % $symbolstartpos ne tourne pas très rond. There is no keyword \verb+$symbolendpos+. Indeed, the problem with \verb+$startpos+ is due to the asymmetry in the definition of \verb+$startpos+ and \verb+$endpos+ in the case of an empty right-hand side, and does not affect \verb+$endpos+. \newcommand{\fineprint}{\footnote{% The computation of \ksymbolstartpos is optimized by \menhir under two assumptions about the lexer. First, \menhir assumes that the lexer never produces a token whose start and end positions are equal. Second, \menhir assumes that two positions produced by the lexer are equal if and only if they are physically equal. If the lexer violates either of these assumptions, the computation of \ksymbolstartpos could produce a result that differs from \texttt{Parsing.symbol\_start\_pos()}. }} The positions computed by \menhir are exactly the same as those computed by \verb+ocamlyacc+\fineprint. More precisely, \fref{fig:pos:mapping} sums up how to translate a call to the \texttt{Parsing} module, as used in an \ocamlyacc grammar, to a \menhir keyword. We note that \menhir's \verb+$startpos+ does not appear in the right-hand column in \fref{fig:pos:mapping}. In other words, \menhir's \verb+$startpos+ does not correspond exactly to any of the \ocamlyacc function calls. An exact \ocamlyacc equivalent of \verb+$startpos+ is \verb+rhs_start_pos 1+ if the current production has a nonempty right-hand side and \verb+symbol_start_pos()+ if it has an empty right-hand side. Finally, we remark that \menhir's \dinline keyword (\sref{sec:inline}) does not affect the computation of positions. The same positions are computed, regardless of where \dinline keywords are placed. % ------------------------------------------------------------------------------ \section{Using \menhir as an interpreter} \label{sec:interpret} When \ointerpret is set, \menhir no longer behaves as a compiler. Instead, it acts as an interpreter. That is, it repeatedly: \begin{itemize} \item reads a sentence off the standard input channel; \item parses this sentence, according to the grammar; \item displays an outcome. \end{itemize} This process stops when the end of the input channel is reached. \subsection{Sentences} \label{sec:sentences} The syntax of sentences is as follows: \begin{center} \begin{tabular}{r@{}c@{}l} \nt{sentence} \is \optional{\nt{lid}\,\deuxpoints} \sepspacelist{\nt{uid}} \,\dnewline \end{tabular} \end{center} Less formally, a sentence is a sequence of zero or more terminal symbols (\nt{uid}'s), separated with whitespace, terminated with a newline character, and optionally preceded with a non-terminal start symbol (\nt{lid}). This non-terminal symbol can be omitted if, and only if, the grammar only has one start symbol. For instance, here are four valid sentences for the grammar of arithmetic expressions found in the directory \distrib{demos/calc}: % \begin{verbatim} main: INT PLUS INT EOL INT PLUS INT INT PLUS PLUS INT EOL INT PLUS PLUS \end{verbatim} % In the first sentence, the start symbol \texttt{main} was explicitly specified. In the other sentences, it was omitted, which is permitted, because this grammar has no start symbol other than \texttt{main}. The first sentence is a stream of four terminal symbols, namely \texttt{INT}, \texttt{PLUS}, \texttt{INT}, and \texttt{EOL}. These terminal symbols must be provided under their symbolic names. Writing, say, ``\texttt{12+32\textbackslash n}'' instead of \texttt{INT PLUS INT EOL} is not permitted. \menhir would not be able to make sense of such a concrete notation, since it does not have a lexer for it. % On pourrait documenter le fait qu'une phrase finie est transformée par \menhir % en un flot de tokens potentiellement infinie, avec un suffixe infini EOF ... % Mais c'est un hack, qui pourrait changer à l'avenir. \subsection{Outcomes} \label{sec:outcomes} As soon as \menhir is able to read a complete sentence off the standard input channel (that is, as soon as it finds the newline character that ends the sentence), it parses the sentence according to whichever grammar was specified on the command line, and displays an outcome. An outcome is one of the following: \begin{itemize} \item \texttt{ACCEPT}: a prefix of the sentence was successfully parsed; a parser generated by \menhir would successfully stop and produce a semantic value; \item \texttt{OVERSHOOT}: the end of the sentence was reached before it could be accepted; a parser generated by \menhir would request a non-existent ``next token'' from the lexer, causing it to fail or block; \item \texttt{REJECT}: the sentence was not accepted; a parser generated by \menhir would raise the exception \texttt{Error}. \end{itemize} When \ointerpretshowcst is set, each \texttt{ACCEPT} outcome is followed with a concrete syntax tree. A concrete syntax tree is either a leaf or a node. A leaf is either a terminal symbol or \error. A node is annotated with a non-terminal symbol, and carries a sequence of immediate descendants that correspond to a valid expansion of this non-terminal symbol. \menhir's notation for concrete syntax trees is as follows: \begin{center} \begin{tabular}{r@{}c@{}l} \nt{cst} \is \nt{uid} \\ && \error \\ && \texttt{[} \nt{lid}\,\deuxpoints \sepspacelist{\nt{cst}} \texttt{]} \end{tabular} \end{center} % This notation is not quite unambiguous (it is ambiguous if several % productions are identical). For instance, if one wished to parse the example sentences of \sref{sec:sentences} using the grammar of arithmetic expressions in \distrib{demos/calc}, one could invoke \menhir as follows: \begin{verbatim} $ menhir --interpret --interpret-show-cst demos/calc/parser.mly main: INT PLUS INT EOL ACCEPT [main: [expr: [expr: INT] PLUS [expr: INT]] EOL] INT PLUS INT OVERSHOOT INT PLUS PLUS INT EOL REJECT INT PLUS PLUS REJECT \end{verbatim} (Here, \menhir's input---the sentences provided by the user on the standard input channel--- is shown intermixed with \menhir's output---the outcomes printed by \menhir on the standard output channel.) The first sentence is valid, and accepted; a concrete syntax tree is displayed. The second sentence is incomplete, because the grammar specifies that a valid expansion of \texttt{main} ends with the terminal symbol \texttt{EOL}; hence, the outcome is \texttt{OVERSHOOT}. The third sentence is invalid, because of the repeated occurrence of the terminal symbol \texttt{PLUS}; the outcome is \texttt{REJECT}. The fourth sentence, a prefix of the third one, is rejected for the same reason. \subsection{Remarks} Using \menhir as an interpreter offers an easy way of debugging your grammar. For instance, if one wished to check that addition is considered left-associative, as requested by the \dleft directive found in the file \distrib{demos/calc/parser.mly}, one could submit the following sentence: \begin{verbatim} $ ./menhir --interpret --interpret-show-cst ../demos/calc/parser.mly INT PLUS INT PLUS INT EOL ACCEPT [main: [expr: [expr: [expr: INT] PLUS [expr: INT]] PLUS [expr: INT]] EOL ] \end{verbatim} %$ The concrete syntax tree displayed by \menhir is skewed towards the left, as desired. The switches \ointerpret and \otrace can be used in conjunction. When \otrace is set, the interpreter logs its actions to the standard error channel. % ------------------------------------------------------------------------------ \section{Generated API} When \menhir processes a grammar specification, say \texttt{parser.mly}, it produces one \ocaml module, \texttt{Parser}, whose code resides in the file \texttt{parser.ml} and whose signature resides in the file \texttt{parser.mli}. We now review this signature. For simplicity, we assume that the grammar specification has just one start symbol \verb+main+, whose \ocaml type is \verb+thing+. % ------------------------------------------------------------------------------ \subsection{Monolithic API} \label{sec:monolithic} The monolithic API defines the type \verb+token+, the exception \verb+Error+, and the parsing function \verb+main+, named after the start symbol of the grammar. %% type token The type \verb+token+ is an algebraic data type. A value of type \verb+token+ represents a terminal symbol and its semantic value. For instance, if the grammar contains the declarations \verb+%token A+ and \verb+%token<int> B+, then the generated file \texttt{parser.mli} contains the following definition: \begin{verbatim} type token = | A | B of int \end{verbatim} % If \oonlytokens is specified on the command line, the type \verb+token+ is generated, and the rest is omitted. On the contrary, if \oexternaltokens is used, the type \verb+token+ is omitted, but the rest (described below) is generated. %% exception Error The exception \verb+Error+ carries no argument. It is raised by the parsing function \verb+main+ (described below) when a syntax error is detected. % \begin{verbatim} exception Error \end{verbatim} %% val main Next comes one parsing function for each start symbol of the grammar. Here, we have assumed that there is one start symbol, named \verb+main+, so the generated file \texttt{parser.mli} contains the following declaration: \begin{verbatim} val main: (Lexing.lexbuf -> token) -> Lexing.lexbuf -> thing \end{verbatim} % On ne montre pas la définition de l'exception Error. This function expects two arguments, namely: a lexer, which typically is produced by \ocamllex and has type \verb+Lexing.lexbuf -> token+; and a lexing buffer, which has type \verb+Lexing.lexbuf+. This API is compatible with \ocamlyacc. (For information on using \menhir without \ocamllex, please consult \sref{sec:qa}.) % This API is ``monolithic'' in the sense that there is just one function, which does everything: it pulls tokens from the lexer, parses, and eventually returns a semantic value (or fails by throwing the exception \texttt{Error}). % ------------------------------------------------------------------------------ \subsection{Incremental API} \label{sec:incremental} If \otable is set, \menhir offers an incremental API in addition to the monolithic API. In this API, control is inverted. The parser does not have access to the lexer. Instead, when the parser needs the next token, it stops and returns its current state to the user. The user is then responsible for obtaining this token (typically by invoking the lexer) and resuming the parser from that state. % The directory \distrib{demos/calc-incremental} contains a demo that illustrates the use of the incremental API. This API is ``incremental'' in the sense that the user has access to a sequence of the intermediate states of the parser. Assuming that semantic values are immutable, a parser state is a persistent data structure: it can be stored and used multiple times, if desired. This enables applications such as ``live parsing'', where a buffer is continuously parsed while it is being edited. The parser can be re-started in the middle of the buffer whenever the user edits a character. Because two successive parser states share most of their data in memory, a list of $n$ successive parser states occupies only $O(n)$ space in memory. % One could point out that semantic actions should be side-effect free. % But that is an absolute requirement. Semantic actions can have side % effects, if the user knows what they are doing. % TEMPORARY actually, live parsing also requires a way of performing % error recovery, up to a complete parse... as in Merlin. % ------------------------------------------------------------------------------ \subsubsection{Starting the parser} In this API, the parser is started by invoking \verb+Incremental.main+. (Recall that we assume that \verb+main+ is the name of the start symbol.) The generated file \texttt{parser.mli} contains the following declaration: \begin{verbatim} module Incremental : sig val main: position -> thing MenhirInterpreter.checkpoint end \end{verbatim} The argument is the initial position. If the lexer is based on an \ocaml lexing buffer, this argument should be \verb+lexbuf.lex_curr_p+. In \sref{sec:incremental} and \sref{sec:inspection}, the type \verb+position+ is a synonym for \verb+Lexing.position+. We emphasize that the function \verb+Incremental.main+ does not parse anything. It constructs a checkpoint which serves as a \emph{starting} point. The functions \verb+offer+ and \verb+resume+, described below, are used to drive the parser. % ------------------------------------------------------------------------------ \subsubsection{Driving the parser} \label{sec:incremental:driving} The sub-module \menhirinterpreter is also part of the incremental API. Its declaration, which appears in the generated file \texttt{parser.mli}, is as follows: \begin{verbatim} module MenhirInterpreter : MenhirLib.IncrementalEngine.INCREMENTAL_ENGINE with type token = token \end{verbatim} The signature \verb+INCREMENTAL_ENGINE+, defined in the module \menhirlibincrementalengine, contains many types and functions, which are described in the rest of this section (\sref{sec:incremental:driving}) and in the following sections (\sref{sec:incremental:inspecting}, \sref{sec:incremental:updating}). Please keep in mind that, from the outside, these types and functions should be referred to with an appropriate prefix. For instance, the type \verb+checkpoint+ should be referred to as \verb+MenhirInterpreter.checkpoint+, or \verb+Parser.MenhirInterpreter.checkpoint+, depending on which modules the user chooses to open. %% type token % Passons-le sous silence. %% type 'a env \begin{verbatim} type 'a env \end{verbatim} The abstract type \verb+'a env+ represents the current state of the parser. (That is, it contains the current state and stack of the LR automaton.) Assuming that semantic values are immutable, it is a persistent data structure: it can be stored and used multiple times, if desired. The parameter \verb+'a+ is the type of the semantic value that will eventually be produced if the parser succeeds. %% type production \begin{verbatim} type production \end{verbatim} The abstract type \verb+production+ represents a production of the grammar. % The ``start productions'' (which do not exist in an \mly file, but are constructed by \menhir internally) are \emph{not} part of this type. %% type 'a checkpoint \begin{verbatim} type 'a checkpoint = private | InputNeeded of 'a env | Shifting of 'a env * 'a env * bool | AboutToReduce of 'a env * production | HandlingError of 'a env | Accepted of 'a | Rejected \end{verbatim} The type \verb+'a checkpoint+ represents an intermediate or final state of the parser. An intermediate checkpoint is a suspension: it records the parser's current state, and allows parsing to be resumed. The parameter \verb+'a+ is the type of the semantic value that will eventually be produced if the parser succeeds. \verb+Accepted+ and \verb+Rejected+ are final checkpoints. \verb+Accepted+ carries a semantic value. \verb+InputNeeded+ is an intermediate checkpoint. It means that the parser wishes to read one token before continuing. \verb+Shifting+ is an intermediate checkpoint. It means that the parser is taking a shift transition. It exposes the state of the parser before and after the transition. The Boolean parameter tells whether the parser intends to request a new token after this transition. (It always does, except when it is about to accept.) \verb+AboutToReduce+ is an intermediate checkpoint: it means that the parser is about to perform a reduction step. \verb+HandlingError+ is also an intermediate checkpoint: it means that the parser has detected an error and is about to handle it. (Error handling is typically performed in several steps, so the next checkpoint is likely to be \verb+HandlingError+ again.) In these two cases, the parser does not need more input. The parser suspends itself at this point only in order to give the user an opportunity to observe the parser's transitions and possibly handle errors in a different manner, if desired. %% val offer \begin{verbatim} val offer: 'a checkpoint -> token * position * position -> 'a checkpoint \end{verbatim} The function \verb+offer+ allows the user to resume the parser after the parser has suspended itself with a checkpoint of the form \verb+InputNeeded env+. This function expects the previous checkpoint \verb+checkpoint+ as well as a new token (together with the start and end positions of this token). It produces a new checkpoint, which again can be an intermediate checkpoint or a final checkpoint. It does not raise any exception. (The exception \texttt{Error} is used only in the monolithic API.) %% val resume \begin{verbatim} val resume: 'a checkpoint -> 'a checkpoint \end{verbatim} The function \verb+resume+ allows the user to resume the parser after the parser has suspended itself with a checkpoint of the form \verb+AboutToReduce (env, prod)+ or \verb+HandlingError env+. This function expects just the previous checkpoint \verb+checkpoint+. It produces a new checkpoint. It does not raise any exception. The incremental API subsumes the monolithic API. Indeed, \verb+main+ can be (and is in fact) implemented by first using \verb+Incremental.main+, then calling \verb+offer+ and \verb+resume+ in a loop, until a final checkpoint is obtained. %% type supplier \begin{verbatim} type supplier = unit -> token * position * position \end{verbatim} A token supplier is a function of no arguments which delivers a new token (together with its start and end positions) every time it is called. The function \verb+loop+ and its variants, described below, expect a supplier as an argument. %% val lexer_lexbuf_to_supplier \begin{verbatim} val lexer_lexbuf_to_supplier: (Lexing.lexbuf -> token) -> Lexing.lexbuf -> supplier \end{verbatim} The function \verb+lexer_lexbuf_to_supplier+, applied to a lexer and to a lexing buffer, produces a fresh supplier. %% (remark about the loop* functions) The functions \verb+offer+ and \verb+resume+, documented above, are sufficient to write a parser loop. One can imagine many variations of such a loop, which is why we expose \verb+offer+ and \verb+resume+ in the first place. Nevertheless, some variations are so common that it is worth providing them, ready for use. The following functions are implemented on top of \verb+offer+ and \verb+resume+. %% val loop \begin{verbatim} val loop: supplier -> 'a checkpoint -> 'a \end{verbatim} \verb+loop supplier checkpoint+ begins parsing from \verb+checkpoint+, reading tokens from \verb+supplier+. It continues parsing until it reaches a checkpoint of the form \verb+Accepted v+ or \verb+Rejected+. In the former case, it returns \verb+v+. In the latter case, it raises the exception \verb+Error+. (By the way, this is how we implement the monolithic API on top of the incremental API.) \begin{verbatim} val loop_handle: ('a -> 'answer) -> ('a checkpoint -> 'answer) -> supplier -> 'a checkpoint -> 'answer \end{verbatim} \verb+loop_handle succeed fail supplier checkpoint+ begins parsing from \verb+checkpoint+, reading tokens from \verb+supplier+. It continues until it reaches a checkpoint of the form \verb+Accepted v+ or \verb+HandlingError _+ (or~\verb+Rejected+, but that should not happen, as \verb+HandlingError _+ will be observed first). In the former case, it calls \verb+succeed v+. In the latter case, it calls \verb+fail+ with this checkpoint. It cannot raise \verb+Error+. This means that \menhir's traditional error-handling procedure (which pops the stack until a state that can act on the \error token is found) does not get a chance to run. Instead, the user can implement her own error handling code, in the \verb+fail+ continuation. %% val loop_handle_undo \begin{verbatim} val loop_handle_undo: ('a -> 'answer) -> ('a checkpoint -> 'a checkpoint -> 'answer) -> supplier -> 'a checkpoint -> 'answer \end{verbatim} \verb+loop_handle_undo+ is analogous to \verb+loop_handle+, but passes a pair of checkpoints (instead of a single checkpoint) to the failure continuation. % The first (and oldest) checkpoint that is passed to the failure continuation is the last \verb+InputNeeded+ checkpoint that was encountered before the error was detected. The second (and newest) checkpoint is where the error was detected. (This is the same checkpoint that \verb+loop_handle+ would pass to its failure continuation.) Going back to the first checkpoint can be thought of as undoing any reductions that were performed after seeing the problematic token. (These reductions must be default reductions or spurious reductions.) This can be useful to someone who wishes to implement an error explanation or error recovery mechanism. \verb+loop_handle_undo+ must be applied to an \verb+InputNeeded+ checkpoint. The initial checkpoint produced by \verb+Incremental.main+ is of this form. %% val shifts \begin{verbatim} val shifts: 'a checkpoint -> 'a env option \end{verbatim} \verb+shifts checkpoint+ assumes that \verb+checkpoint+ has been obtained by submitting a token to the parser. It runs the parser from \verb+checkpoint+, through an arbitrary number of reductions, until the parser either accepts this token (i.e., shifts) or rejects it (i.e., signals an error). If the parser decides to shift, then \verb+Some env+ is returned, where \verb+env+ is the parser's state just before shifting. Otherwise, \verb+None+ is returned. This can be used to test whether the parser is willing to accept a certain token. This function should be used with caution, though, as it causes semantic actions to be executed. It is desirable that all semantic actions be side-effect-free, or that their side-effects be harmless. %% val acceptable \begin{verbatim} val acceptable: 'a checkpoint -> token -> position -> bool \end{verbatim} \verb+acceptable checkpoint token pos+ requires \verb+checkpoint+ to be an \verb+InputNeeded+ checkpoint. It returns \verb+true+ iff the parser is willing to shift this token. % This can be used to test, after an error has been detected, which tokens would have been accepted at this point. To do this, one would typically use \verb+loop_handle_undo+ to get access to the last \verb+InputNeeded+ checkpoint that was encountered before the error was detected, and apply \verb+acceptable+ to that checkpoint. \verb+acceptable+ is implemented using \verb+shifts+, so, like \verb+shifts+, it causes certain semantic actions to be executed. It is desirable that all semantic actions be side-effect-free, or that their side-effects be harmless. % ------------------------------------------------------------------------------ \subsubsection{Inspecting the parser's state} \label{sec:incremental:inspecting} Although the type \verb+env+ is opaque, a parser state can be inspected via a few accessor functions, which are described in this section. The following types and functions are contained in the \verb+MenhirInterpreter+ sub-module. %% type 'a lr1state \begin{verbatim} type 'a lr1state \end{verbatim} The abstract type \verb+'a lr1state+ describes a (non-initial) state of the LR(1) automaton. % If \verb+s+ is such a state, then \verb+s+ should have at least one incoming transition, and all of its incoming transitions carry the same (terminal or non-terminal) symbol, say $A$. We say that $A$ is the \emph{incoming symbol} of the state~\verb+s+. % The index \verb+'a+ is the type of the semantic values associated with $A$. The role played by \verb+'a+ is clarified in the definition of the type \verb+element+, which appears further on. %% val number \begin{verbatim} val number: _ lr1state -> int \end{verbatim} The states of the LR(1) automaton are numbered (from 0 and up). The function \verb+number+ maps a state to its number. %% val production_index %% val find_production \begin{verbatim} val production_index: production -> int val find_production: int -> production \end{verbatim} Productions are numbered. (The set of indices of all productions forms an interval, which does \emph{not} necessarily begin at 0.) % The function \verb+production_index+ converts a production to an integer number, whereas the function \verb+find_production+ carries out the reverse conversion. It is an error to apply \verb+find_production+ to an invalid index. %% type element \begin{verbatim} type element = | Element: 'a lr1state * 'a * position * position -> element \end{verbatim} The type \verb+element+ describes one entry in the stack of the LR(1) automaton. In a stack element of the form \verb+Element (s, v, startp, endp)+, \verb+s+ is a (non-initial) state and \verb+v+ is a semantic value. The value~\verb+v+ is associated with the incoming symbol~$A$ of the state~\verb+s+. In other words, the value \verb+v+ was pushed onto the stack just before the state \verb+s+ was entered. Thus, for some type \verb+'a+, the state~\verb+s+ has type \verb+'a lr1state+ and the value~\verb+v+ has type~\verb+'a+. The positions \verb+startp+ and \verb+endp+ delimit the fragment of the input text that was reduced to the symbol $A$. In order to do anything useful with the value \verb+v+, one must gain information about the type \verb+'a+, by inspection of the state~\verb+s+. So far, the type \verb+'a lr1state+ is abstract, so there is no way of inspecting~\verb+s+. The inspection API (\sref{sec:inspection}) offers further tools for this purpose. %% val top \begin{verbatim} val top: 'a env -> element option \end{verbatim} \verb+top env+ returns the parser's top stack element. The state contained in this stack element is the current state of the automaton. If the stack is empty, \verb+None+ is returned. In that case, the current state of the automaton must be an initial state. %% val pop_many \begin{verbatim} val pop_many: int -> 'a env -> 'a env option \end{verbatim} \verb+pop_many i env+ pops \verb+i+ elements off the automaton's stack. This is done via \verb+i+ successive invocations of \verb+pop+. Thus, \verb+pop_many 1+ is \verb+pop+. The index \verb+i+ must be nonnegative. The time complexity is $O(i)$. %% val get \begin{verbatim} val get: int -> 'a env -> element option \end{verbatim} \verb+get i env+ returns the parser's \verb+i+-th stack element. The index \verb+i+ is 0-based: thus, \verb+get 0+ is \verb+top+. If \verb+i+ is greater than or equal to the number of elements in the stack, \verb+None+ is returned. \verb+get+ is implemented using \verb+pop_many+ and \verb+top+: its time complexity is $O(i)$. %% val current_state_number \begin{verbatim} val current_state_number: 'a env -> int \end{verbatim} \verb+current_state_number env+ is the integer number of the automaton's current state. Although this number might conceivably be obtained via the functions~\verb+top+ and \verb+number+, using \verb+current_state_number+ is preferable, because this method works even when the automaton's stack is empty (in which case the current state is an initial state, and \verb+top+ returns \verb+None+). This number can be passed as an argument to a \verb+message+ function generated by \verb+menhir --compile-errors+. %% val equal \begin{verbatim} val equal: 'a env -> 'a env -> bool \end{verbatim} \verb+equal env1 env2+ tells whether the parser configurations \verb+env1+ and \verb+env2+ are equal in the sense that the automaton's current state is the same in \verb+env1+ and \verb+env2+ and the stack is \emph{physically} the same in \verb+env1+ and \verb+env2+. If \verb+equal env1 env2+ is \verb+true+, then the sequence of the stack elements, as observed via \verb+pop+ and \verb+top+, must be the same in \verb+env1+ and \verb+env2+. Also, if \verb+equal env1 env2+ holds, then the checkpoints \verb+input_needed env1+ and \verb+input_needed env2+ must be equivalent. (The function \verb+input_needed+ is documented in \sref{sec:incremental:updating}.) The function \verb+equal+ has time complexity $O(1)$. %% val positions \begin{verbatim} val positions: 'a env -> position * position \end{verbatim} The function \verb+positions+ returns the start and end positions of the current lookahead token. If invoked in an initial state, this function returns a pair of twice the initial position that was passed as an argument to \verb+main+. %% val has_default_reduction %% val state_has_default_reduction \begin{verbatim} val env_has_default_reduction: 'a env -> bool val state_has_default_reduction: _ lr1state -> bool \end{verbatim} When applied to an environment \verb+env+ taken from a checkpoint of the form \verb+AboutToReduce (env, prod)+, the function \verb+env_has_default_reduction+ tells whether the reduction that is about to take place is a default reduction. \verb+state_has_default_reduction s+ tells whether the state \verb+s+ has a default reduction. This includes the case where \verb+s+ is an accepting state. % ------------------------------------------------------------------------------ \subsubsection{Updating the parser's state} \label{sec:incremental:updating} The functions presented in the previous section (\sref{sec:incremental:inspecting}) allow inspecting parser states of type \verb+'a checkpoint+ and \verb+'a env+. However, so far, there are no functions for manufacturing new parser states, except \verb+offer+ and \verb+resume+, which create new checkpoints by feeding tokens, one by one, to the parser. In this section, a small number of functions are provided for manufacturing new parser states of type \verb+'a env+ and \verb+'a checkpoint+. These functions allow going far back into the past and jumping ahead into the future, so to speak. In other words, they allow driving the parser in other ways than by feeding tokens into it. The functions \verb+pop+, \verb+force_reduction+ and \verb+feed+ (part of the inspection API; see \sref{sec:inspection}) construct values of type \verb+'a env+. The function \verb+input_needed+ constructs values of type \verb+'a checkpoint+ and thereby allows resuming parsing in normal mode (via \verb+offer+). Together, these functions can be used to implement error handling and error recovery strategies. %% val pop \begin{verbatim} val pop: 'a env -> 'a env option \end{verbatim} \verb+pop env+ returns a new environment, where the parser's top stack cell has been popped off. (If the stack is empty, \verb+None+ is returned.) This amounts to pretending that the (terminal or nonterminal) symbol that corresponds to this stack cell has not been read. %% val force_reduction \begin{verbatim} val force_reduction: production -> 'a env -> 'a env \end{verbatim} \verb+force_reduction prod env+ can be called only if in the state \verb+env+ the parser is capable of reducing the production \verb+prod+. If this condition is satisfied, then this production is reduced, which means that its semantic action is executed (this can have side effects!) and the automaton makes a goto (nonterminal) transition. If this condition is not satisfied, an \verb+Invalid_argument+ exception is raised. %% val input_needed \begin{verbatim} val input_needed: 'a env -> 'a checkpoint \end{verbatim} \verb+input_needed env+ returns \verb+InputNeeded env+. Thus, out of a parser state that might have been obtained via a series of calls to the functions \verb+pop+, \verb+force_reduction+, \verb+feed+, and so on, it produces a checkpoint, which can be used to resume normal parsing, by supplying this checkpoint as an argument to \verb+offer+. This function should be used with some care. It could ``mess up the lookahead'' in the sense that it allows parsing to resume in an arbitrary state \verb+s+ with an arbitrary lookahead symbol \verb+t+, even though \menhir's reachability analysis (which is carried out via the \olisterrors switch) might well think that it is impossible to reach this particular configuration. If one is using \menhir's new error reporting facility (\sref{sec:errors:new}), this could cause the parser to reach an error state for which no error message has been prepared. % ------------------------------------------------------------------------------ \subsection{Inspection API} \label{sec:inspection} If \oinspection is set, \menhir offers an inspection API in addition to the monolithic and incremental APIs. (The reason why this is not done by default is that this requires more tables to be generated, thus making the generated parser larger.) Like the incremental API, the inspection API is found in the sub-module \menhirinterpreter. It offers the following types and functions. %% type _ terminal The type \verb+'a terminal+ is a generalized algebraic data type (GADT). A value of type \verb+'a terminal+ represents a terminal symbol (without a semantic value). The index \verb+'a+ is the type of the semantic values associated with this symbol. For instance, if the grammar contains the declarations \verb+%token A+ and \verb+%token<int> B+, then the generated module \menhirinterpreter contains the following definition: % \begin{verbatim} type _ terminal = | T_A : unit terminal | T_B : int terminal \end{verbatim} % The data constructors are named after the terminal symbols, prefixed with ``\verb+T_+''. %% type _ nonterminal The type \verb+'a nonterminal+ is also a GADT. A value of type \verb+'a nonterminal+ represents a nonterminal symbol (without a semantic value). The index \verb+'a+ is the type of the semantic values associated with this symbol. For instance, if \verb+main+ is the only nonterminal symbol, then the generated module \menhirinterpreter contains the following definition: % \begin{verbatim} type _ nonterminal = | N_main : thing nonterminal \end{verbatim} % The data constructors are named after the nonterminal symbols, prefixed with ``\verb+N_+''. %% type 'a symbol The type \verb+'a symbol+ % (an algebraic data type) is the disjoint union of the types \verb+'a terminal+ and \verb+'a nonterminal+. In other words, a value of type \verb+'a symbol+ represents a terminal or nonterminal symbol (without a semantic value). This type is (always) defined as follows: % \begin{verbatim} type 'a symbol = | T : 'a terminal -> 'a symbol | N : 'a nonterminal -> 'a symbol \end{verbatim} %% type xsymbol The type \verb+xsymbol+ is an existentially quantified version of the type \verb+'a symbol+. It is useful in situations where the index \verb+'a+ is not statically known. It is (always) defined as follows: % \begin{verbatim} type xsymbol = | X : 'a symbol -> xsymbol \end{verbatim} %% type item The type \verb+item+ describes an LR(0) item, that is, a pair of a production \verb+prod+ and an index \verb+i+ into the right-hand side of this production. If the length of the right-hand side is \verb+n+, then \verb+i+ is comprised between 0 and \verb+n+, inclusive. \begin{verbatim} type item = production * int \end{verbatim} %% Comparison functions. The following functions implement total orderings on the types \verb+_ terminal+, \verb+_ nonterminal+, \verb+xsymbol+, \verb+production+, and \verb+item+. \begin{verbatim} val compare_terminals: _ terminal -> _ terminal -> int val compare_nonterminals: _ nonterminal -> _ nonterminal -> int val compare_symbols: xsymbol -> xsymbol -> int val compare_productions: production -> production -> int val compare_items: item -> item -> int \end{verbatim} %% val incoming_symbol The function \verb+incoming_symbol+ maps a (non-initial) LR(1) state~\verb+s+ to its incoming symbol, that is, the symbol that the parser must recognize before it enters the state \verb+s+. % \begin{verbatim} val incoming_symbol: 'a lr1state -> 'a symbol \end{verbatim} % This function can be used to gain access to the semantic value \verb+v+ in a stack element \verb+Element (s, v, _, _)+. Indeed, by case analysis on the symbol \verb+incoming_symbol s+, one gains information about the type \verb+'a+, hence one obtains the ability to do something useful with the value~\verb+v+. %% val items The function \verb+items+ maps a (non-initial) LR(1) state~\verb+s+ to its LR(0) \emph{core}, that is, to the underlying set of LR(0) items. This set is represented as a list, whose elements appear in an arbitrary order. This set is \emph{not} closed under $\epsilon$-transitions. % \begin{verbatim} val items: _ lr1state -> item list \end{verbatim} %% val lhs %% val rhs The functions \verb+lhs+ and \verb+rhs+ map a production \verb+prod+ to its left-hand side and right-hand side, respectively. The left-hand side is always a nonterminal symbol, hence always of the form \verb+N _+. The right-hand side is a (possibly empty) sequence of (terminal or nonterminal) symbols. % \begin{verbatim} val lhs: production -> xsymbol val rhs: production -> xsymbol list \end{verbatim} % %% val nullable The function \verb+nullable+, applied to a non-terminal symbol, tells whether this symbol is nullable. A nonterminal symbol is nullable if and only if it produces the empty word $\epsilon$. % \begin{verbatim} val nullable: _ nonterminal -> bool \end{verbatim} %% val first %% val xfirst The function call \verb+first nt t+ tells whether the \emph{FIRST} set of the nonterminal symbol \verb+nt+ contains the terminal symbol \verb+t+. That is, it returns \verb+true+ if and only if \verb+nt+ produces a word that begins with \verb+t+. The function \verb+xfirst+ is identical to \verb+first+, except it expects a first argument of type \verb+xsymbol+ instead of \verb+_ terminal+. % \begin{verbatim} val first: _ nonterminal -> _ terminal -> bool val xfirst: xsymbol -> _ terminal -> bool \end{verbatim} %% val foreach_terminal %% val foreach_terminal_but_error The function \verb+foreach_terminal+ enumerates the terminal symbols, including the special symbol \error. The function \verb+foreach_terminal_but_error+ enumerates the terminal symbols, excluding \error. \begin{verbatim} val foreach_terminal: (xsymbol -> 'a -> 'a) -> 'a -> 'a val foreach_terminal_but_error: (xsymbol -> 'a -> 'a) -> 'a -> 'a \end{verbatim} %% val feed \verb+feed symbol startp semv endp env+ causes the parser to consume the (terminal or nonterminal) symbol \verb+symbol+, accompanied with the semantic value \verb+semv+ and with the start and end positions \verb+startp+ and \verb+endp+. Thus, the automaton makes a transition, and reaches a new state. The stack grows by one cell. This operation is permitted only if the current state (as determined by \verb+env+) has an outgoing transition labeled with \verb+symbol+. Otherwise, an \verb+Invalid_argument+ exception is raised. \begin{verbatim} val feed: 'a symbol -> position -> 'a -> position -> 'b env -> 'b env \end{verbatim} % TEMPORARY % document the modules that use the inspection API: Printers % document MenhirLib.General? % The directory \distrib{demos/calc-inspection} contains a demo that illustrates the use of the inspection API. % review it / clean it up! % ------------------------------------------------------------------------------ \section{Error handling: the traditional way} \label{sec:errors} \menhir's traditional error handling mechanism is considered deprecated: although it is still supported for the time being, it might be removed in the future. We recommend setting up an error handling mechanism using the new tools offered by \menhir (\sref{sec:errors:new}). \paragraph{Error handling} \menhir's error traditional handling mechanism is inspired by that of \yacc and \ocamlyacc, but is not identical. A special \error token is made available for use within productions. The LR automaton is constructed exactly as if \error was a regular terminal symbol. However, \error is never produced by the lexical analyzer. Instead, when an error is detected, the current lookahead token is discarded and replaced with the \error token, which becomes the current lookahead token. At this point, the parser enters \emph{error handling} mode. In error handling mode, automaton states are popped off the automaton's stack until a state that can \emph{act} on \error is found. This includes \emph{both} shift \emph{and} reduce actions. (\yacc and \ocamlyacc do not trigger reduce actions on \error. It is somewhat unclear why this is so.) When a state that can reduce on \error is found, reduction is performed. Since the lookahead token is still \error, the automaton remains in error handling mode. When a state that can shift on \error is found, the \error token is shifted. At this point, the parser returns to normal mode. When no state that can act on \error is found on the automaton's stack, the parser stops and raises the exception \texttt{Error}. This exception carries no information. The position of the error can be obtained by reading the lexical analyzer's environment record. \paragraph{Error recovery} \ocamlyacc offers an error recovery mode, which is entered immediately after an \error token was successfully shifted. In this mode, tokens are repeatedly taken off the input stream and discarded until an acceptable token is found. This feature is no longer offered by \menhir. \paragraph{Error-related keywords} The following keyword is made available to semantic actions. When the \verb+$syntaxerror+ keyword is evaluated, evaluation of the semantic action is aborted, so that the current reduction is abandoned; the current lookahead token is discarded and replaced with the \error token; and error handling mode is entered. Note that there is no mechanism for inserting an \error token \emph{in front of} the current lookahead token, even though this might also be desirable. It is unclear whether this keyword is useful; it might be suppressed in the future. % ------------------------------------------------------------------------------ \section{Error handling: the new way} \label{sec:errors:new} \menhir's incremental API (\sref{sec:incremental}) allows taking control when an error is detected. Indeed, as soon as an invalid token is detected, the parser produces a checkpoint of the form \verb+HandlingError _+. At this point, if one decides to let the parser proceed, by just calling \verb+resume+, then \menhir enters its traditional error handling mode (\sref{sec:errors}). Instead, however, one can decide to take control and perform error handling or error recovery in any way one pleases. One can, for instance, build and display a diagnostic message, based on the automaton's current stack and/or state. Or, one could modify the input stream, by inserting or deleting tokens, so as to suppress the error, and resume normal parsing. In principle, the possibilities are endless. An apparently simple-minded approach to error reporting, proposed by Jeffery~\cite{jeffery-03} and further explored by Pottier~\cite{pottier-reachability-cc-2016}, consists in selecting a diagnostic message (or a template for a diagnostic message) based purely on the current state of the automaton. In this approach, one determines, ahead of time, which are the ``error states'' (that is, the states in which an error can be detected), and one prepares, for each error state, a diagnostic message. Because state numbers are fragile (they change when the grammar evolves), an error state is identified not by its number, but by an input sentence that leads to it: more precisely, by an input sentence which causes an error to be detected in this state. Thus, one maintains a set of pairs of an erroneous input sentence and a diagnostic message. \menhir defines a file format, the \messages file format, for representing this information (\sref{sec:messages:format}), and offers a set of tools for creating, maintaining, and exploiting \messages files (\sref{sec:messages:tools}). Once one understands these tools, there remains to write a collection of diagnostic messages, a more subtle task than one might think (\sref{sec:errors:diagnostics}), and to glue everything together (\sref{sec:errors:example}). In this approach to error handling, as in any other approach, one must understand exactly when (that is, in which states) errors are detected. This in turn requires understanding how the automaton is constructed. \menhir's construction technique is not Knuth's canonical LR(1) technique~\cite{knuth-lr-65}, which is usually too expensive to be practical. Instead, \menhir \emph{merges} states~\cite{pager-77} and introduces so-called \emph{default reductions}. These techniques \emph{defer} error detection by allowing extra reductions to take place before an error is detected. % Furthermore, \menhir supports \donerrorreduce declarations, % which also introduce extra reductions. The impact of these alterations must be taken into account when writing diagnostic messages (\sref{sec:errors:diagnostics}). In this approach to error handling, the special \error token is not used. It should not appear in the grammar. Similarly, the \verb+$syntaxerror+ keyword should not be used. % ------------------------------------------------------------------------------ \subsection{The \messages file format} \label{sec:messages:format} A \messages file is a text file. Comment lines, which begin with a \verb+#+ character, are ignored everywhere. As is evident in the following description, blank lines are significant: they are used as separators between entries and within an entry. A~\messages file is composed of a list of entries. Two entries are separated by one or more blank lines. Each entry consists of one or more input sentences, followed with one or more blank lines, followed with a message. The syntax of an input sentence is described in \sref{sec:sentences}. A message is arbitrary text, but cannot contain a blank line. We stress that there cannot be a blank line between two sentences (if there is one, \menhir becomes confused and may complain about some word not being ``a known non-terminal symbol''). \begin{figure} \begin{verbatim} grammar: TYPE UID grammar: TYPE OCAMLTYPE UID PREC # A (handwritten) comment. Ill-formed declaration. Examples of well-formed declarations: %type <Syntax.expression> expression %type <int> date time \end{verbatim} \caption{An entry in a \messages file} \label{fig:messages:entry} \end{figure} \begin{figure} \begin{verbatim} grammar: TYPE UID ## ## Ends in an error in state: 1. ## ## declaration -> TYPE . OCAMLTYPE separated_nonempty_list(option(COMMA), ## strict_actual) [ TYPE TOKEN START RIGHT PUBLIC PERCENTPERCENT PARAMETER ## ON_ERROR_REDUCE NONASSOC LEFT INLINE HEADER EOF COLON ] ## ## The known suffix of the stack is as follows: ## TYPE ## grammar: TYPE OCAMLTYPE UID PREC ## ## Ends in an error in state: 5. ## ## strict_actual -> symbol . loption(delimited(LPAREN,separated_nonempty_list ## (COMMA,strict_actual),RPAREN)) [ UID TYPE TOKEN START STAR RIGHT QUESTION ## PUBLIC PLUS PERCENTPERCENT PARAMETER ON_ERROR_REDUCE NONASSOC LID LEFT ## INLINE HEADER EOF COMMA COLON ] ## ## The known suffix of the stack is as follows: ## symbol ## # A (handwritten) comment. Ill-formed declaration. Examples of well-formed declarations: %type <Syntax.expression> expression %type <int> date time \end{verbatim} \caption{An entry in a \messages file, decorated with auto-generated comments} \label{fig:messages:entry:decorated} \end{figure} As an example, \fref{fig:messages:entry} shows a valid entry, taken from \menhir's own \messages file. This entry contains two input sentences, which lead to errors in two distinct states. A single message is associated with these two error states. Several commands, described next (\sref{sec:messages:tools}), produce \messages files where each input sentence is followed with an auto-generated comment, marked with \verb+##+. This special comment indicates in which state the error is detected, and is supposed to help the reader understand what it means to be in this state: What has been read so far? What is expected next? As an example, the previous entry, decorated with auto-generated comments, is shown in \fref{fig:messages:entry:decorated}. (We have manually wrapped the lines that did not fit in this document.) An auto-generated comment begins with the number of the error state that is reached via this input sentence. Then, the auto-generated comment shows the LR(1) items that compose this state, in the same format as in an \automaton file. these items offer a description of the past (that is, what has been read so far) and the future (that is, which terminal symbols are allowed next). Finally, the auto-generated comment shows what is known about the stack when the automaton is in this state. (This can be deduced from the LR(1) items, but is more readable if shown separately.) % Plus, there might be cases where the known suffix is longer than the what % the LR(1) items suggest. But I have never seen this yet. In a canonical LR(1) automaton, the LR(1) items offer an exact description of the past and future. However, in a noncanonical automaton, which is by default what \menhir produces, the situation is more subtle. The lookahead sets can be over-approximated, so the automaton can perform one or more ``spurious reductions'' before an error is detected. As a result, the LR(1) items in the error state offer a description of the future that may be both incorrect (that is, a terminal symbol that appears in a lookahead set is not necessarily a valid continuation) and incomplete (that is, a terminal symbol that does not appear in any lookahead set may nevertheless be a valid continuation). More details appear further on (\sref{sec:errors:diagnostics}). In order to attract the user's attention to this issue, if an input sentence causes one or more spurious reductions, then the auto-generated comment contains a warning about this fact. This mechanism is not completely foolproof, though, as it may be the case that one particular sentence does not cause any spurious reductions (hence, no warning appears), yet leads to an error state that can be reached via other sentences that do involve spurious reductions. % Not sure what to conclude about this issue... % ------------------------------------------------------------------------------ \subsection{Maintaining \messages files} \label{sec:messages:tools} Ideally, the set of input sentences in a \messages file should be correct (that is, every sentence causes an error on its last token), irredundant (that is, no two sentences lead to the same error state), and complete (that is, every error state is reached by some sentence). Correctness and irredundancy are checked by the command \ocompileerrors \nt{filename}, where \nt{filename} is the name of a \messages file. This command fails if a sentence does not cause an error at all, or causes an error too early. It also fails if two sentences lead to the same error state. % If the file is correct and irredundant, then (as its name suggests) this command compiles the \messages file down to an \ocaml function, whose code is printed on the standard output channel. This function, named \verb+message+, has type \verb+int -> string+, and maps a state number to a message. It raises the exception \verb+Not_found+ if its argument is not the number of a state for which a message has been defined. Completeness is checked via the commands \olisterrors and \ocompareerrors. The former produces, from scratch, a complete set of input sentences, that is, a set of input sentences that reaches all error states. The latter compares two sets of sentences (more precisely, the two underlying sets of error states) for inclusion. The command \olisterrors first computes all possible ways of causing an error. From this information, it deduces a list of all error states, that is, all states where an error can be detected. For each of these states, it computes a (minimal) input sentence that causes an error in this state. Finally, it prints these sentences, in the \messages file format, on the standard output channel. Each sentence is followed with an auto-generated comment and with a dummy diagnostic message. The user should be warned that this algorithm may require large amounts of time (typically in the tens of seconds, possibly more) and memory (typically in the gigabytes, possibly more). It requires a 64-bit machine. (On a 32-bit machine, it works, but quickly hits a built-in size limit.) At the verbosity level \ologautomaton~\texttt{2}, it displays some progress information and internal statistics on the standard error channel. The command \ocompareerrors \nt{filename1} \ocompareerrors \nt{filename2} compares the \messages files \nt{filename1} and \nt{filename2}. Each file is read and internally translated to a mapping of states to messages. \menhir then checks that the left-hand mapping is a subset of the right-hand mapping. That is, if a state~$s$ is reached by some sentence in \nt{filename1}, then it should also be reached by some sentence in \nt{filename2}. Furthermore, if the message associated with $s$ in \nt{filename1} is not a dummy message, then the same message should be associated with $s$ in \nt{filename2}. To check that the sentences in \nt{filename2} cover all error states, it suffices to (1)~use \olisterrors to produce a complete set of sentences, which one stores in \nt{filename1}, then (2)~use \ocompareerrors to compare \nt{filename1} and \nt{filename2}. In the case of a grammar that evolves fairly often, it can take significant human time and effort to update the \messages file and ensure correctness, irredundancy, and completeness. A way of reducing this effort is to abandon completeness. This implies that the auto-generated \verb+message+ function can raise \verb+Not_found+ and that a generic ``syntax error'' message must be produced in that case. We prefer to discourage this approach, as it implies that the end user is exposed to a mixture of specific and generic syntax error messages, and there is no guarantee that the specific (hand-written) messages will appear in \emph{all} situations where there are expected to appear. Instead, we recommend waiting for the grammar to become stable and enforcing completeness. The command \oupdateerrors \nt{filename} is used to update the auto-generated comments in the \messages file \nt{filename}. It is typically used after a change in the grammar (or in the command line options that affect the construction of the automaton). A new \messages file is produced on the standard output channel. It is identical to \nt{filename}, except the auto-generated comments, identified by \verb+##+, have been removed and re-generated. The command \oechoerrors \nt{filename} is used to filter out all comments, blank lines, and messages from the \messages file \nt{filename}. The input sentences, and nothing else, are echoed on the standard output channel. As an example application, one could then translate the sentences to concrete syntax and create a collection of source files that trigger every possible syntax error. The command \ointerpreterror is analogous to \ointerpret. It causes \menhir to act as an interpreter. \menhir reads sentences off the standard input channel, parses them, and displays the outcome. This switch can be usefully combined with \otrace. The main difference between \ointerpret and \ointerpreterror is that, when the latter command is used, \menhir expects the input sentence to cause an error on its last token, and displays information about the state in which the error is detected, in the form of a \messages file entry. This can be used to quickly find out exactly what error is caused by one particular input sentence. % ------------------------------------------------------------------------------ \subsection{Writing accurate diagnostic messages} \label{sec:errors:diagnostics} One might think that writing a diagnostic message for each error state is a straightforward (if lengthy) task. In reality, it is not so simple. % Here are a few guidelines. % The reader is referred to Pottier's % paper~\cite{pottier-reachability-cc-2016} for more details. \paragraph{A state, not a sentence} The first thing to keep in mind is that a diagnostic message is associated with a \emph{state}~$s$, as opposed to a sentence. An entry in a \messages file contains a sentence~$w$ that leads to an error in state~$s$. This sentence is just one way of causing an error in state~$s$; there may exist many other sentences that also cause an error in this state. The diagnostic message should not be specific of the sentence~$w$: it should make sense regardless of how the state~$s$ is reached. As a rule of thumb, when writing a diagnostic message, one should (as much as possible) ignore the example sentence~$w$ altogether, and concentrate on the description of the state~$s$, which appears as part of the auto-generated comment. The LR(1) items that compose the state~$s$ offer a description of the past (that is, what has been read so far) and the future (that is, which terminal symbols are allowed next). A diagnostic message should be designed based on this description. \begin{figure} \verbatiminput{declarations.mly} \caption{A grammar where one error state is difficult to explain} \label{fig:declarations} \end{figure} \begin{figure} \begin{verbatim} program: ID COLON ID LPAREN ## ## Ends in an error in state: 8. ## ## typ1 -> typ0 . [ SEMICOLON RPAREN ] ## typ1 -> typ0 . ARROW typ1 [ SEMICOLON RPAREN ] ## ## The known suffix of the stack is as follows: ## typ0 ## \end{verbatim} \caption{A problematic error state in the grammar of \fref{fig:declarations}, due to over-approximation} \label{fig:declarations:over} \end{figure} \paragraph{The problem of over-approximated lookahead sets} As pointed out earlier (\sref{sec:messages:format}), in a noncanonical automaton, the lookahead sets in the LR(1) items can be both over- and under-approximated. One must be aware of this phenomenon, otherwise one runs the risk of writing a diagnostic message that proposes too many or too few continuations. As an example, let us consider the grammar in \fref{fig:declarations}. According to this grammar, a ``program'' is either a declaration between parentheses or a declaration followed with a semicolon. A ``declaration'' is an identifier, followed with a colon, followed with a type. A ``type'' is an identifier, a type between parentheses, or a function type in the style of \ocaml. The (noncanonical) automaton produced by \menhir for this grammar has 17~states. Using \olisterrors, we find that an error can be detected in 10 of these 17~states. By manual inspection of the auto-generated comments, we find that for 9 out of these 10~states, writing an accurate diagnostic message is easy. However, one problematic state remains, namely state~8, shown in \fref{fig:declarations:over}. In this state, a (level-0) type has just been read. One valid continuation, which corresponds to the second LR(1) item in \fref{fig:declarations:over}, is to continue this type: the terminal symbol \verb+ARROW+, followed with a (level-1) type, is a valid continuation. Now, the question is, what other valid continuations are there? By examining the first LR(1) item in \fref{fig:declarations:over}, it may look as if both \verb+SEMICOLON+ and \verb+RPAREN+ are valid continuations. However, this cannot be the case. A moment's thought reveals that \emph{either} we have seen an opening parenthesis \verb+LPAREN+ at the very beginning of the program, in which case we definitely expect a closing parenthesis \verb+RPAREN+; \emph{or} we have not seen one, in which case we definitely expect a semicolon \verb+SEMICOLON+. It is \emph{never} the case that \emph{both} \verb+SEMICOLON+ and \verb+RPAREN+ are valid continuations! In fact, the lookahead set in the first LR(1) item in \fref{fig:declarations:over} is over-approximated. State~8 in the noncanonical automaton results from merging two states in the canonical automaton. In such a situation, one cannot write an accurate diagnostic message. % by lack of ``static context''. Knowing that the automaton is in state~8 does not give us a precise view of the valid continuations. Some valuable information (that is, whether we have seen an opening parenthesis \verb+LPAREN+ at the very beginning of the program) is buried in the automaton's stack. \begin{figure} \verbatiminput{declarations-phantom.mly} \caption{Splitting the problematic state of \fref{fig:declarations:over} via selective duplication} \label{fig:declarations:phantom} \end{figure} \begin{figure} \verbatiminput{declarations-onerrorreduce.mly} \caption{Avoiding the problematic state of \fref{fig:declarations:over} via reductions on error} \label{fig:declarations:onerrorreduce} \end{figure} \begin{figure} \begin{verbatim} program: ID COLON ID LPAREN ## ## Ends in an error in state: 15. ## ## program -> declaration . SEMICOLON [ # ] ## ## The known suffix of the stack is as follows: ## declaration ## ## WARNING: This example involves spurious reductions. ## This implies that, although the LR(1) items shown above provide an ## accurate view of the past (what has been recognized so far), they ## may provide an INCOMPLETE view of the future (what was expected next). ## In state 8, spurious reduction of production typ1 -> typ0 ## In state 11, spurious reduction of production declaration -> ID COLON typ1 ## \end{verbatim} \caption{A problematic error state in the grammar of \fref{fig:declarations:onerrorreduce}, due to under-approximation} \label{fig:declarations:under} \end{figure} How can one work around this problem? Let us suggest three options. \paragraph{Blind duplication of states} One option would be to build a canonical automaton by using the % (undocumented!) \ocanonical switch. In this example, one would obtain a 27-state automaton, where the problem has disappeared. However, this option is rarely viable, as it duplicates many states without good reason. \paragraph{Selective duplication of states} A second option is to manually cause just enough duplication to remove the problematic over-approximation. In our example, we wish to distinguish two kinds of types and declarations, namely those that must be followed with a closing parenthesis, and those that must be followed with a semicolon. We create such a distinction by parameterizing \verb+typ1+ and \verb+declaration+ with a phantom parameter. The modified grammar is shown in \fref{fig:declarations:phantom}. The phantom parameter does not affect the language that is accepted: for instance, the nonterminal symbols \texttt{declaration(SEMICOLON)} and \texttt{declaration(RPAREN)} generate the same language as \texttt{declaration} in the grammar of \fref{fig:declarations}. Yet, by giving distinct names to these two symbols, we force the construction of an automaton where more states are distinguished. In this example, \menhir produces a 23-state automaton. Using \olisterrors, we find that an error can be detected in 11 of these 23~states, and by manual inspection of the auto-generated comments, we find that for each of these 11~states, writing an accurate diagnostic message is easy. In summary, we have selectively duplicated just enough states so as to split the problematic error state into two non-problematic error states. % Je me demande s'il n'y a pas un lien avec la traduction de LR(k+1) vers LR(k)... % On voit que le FOLLOW est intégré au symbole nonterminal. \paragraph{Reductions on error} A third and last option is to introduce an \donerrorreduce declaration (\sref{sec:onerrorreduce}) so as to prevent the detection of an error in the problematic state~8. We see in \fref{fig:declarations:over} that, in state~8, the production $\texttt{typ1} \rightarrow \texttt{typ0}$ is ready to be reduced. If we could force this reduction to take place, then the automaton would move to some other state where it would be clear which of \verb+SEMICOLON+ and \verb+RPAREN+ is expected. We achieve this by marking \verb+typ1+ as ``reducible on error''. The modified grammar is shown in \fref{fig:declarations:onerrorreduce}. For this grammar, \menhir produces a 17-state automaton. (This is the exact same automaton as for the grammar of \fref{fig:declarations}, except 2 of the 17 states have received extra reduction actions.) Using \olisterrors, we find that an error can be detected in 9 of these~17 states. The problematic state, namely state~8, is no longer an error state! The problem has vanished. \paragraph{The problem of under-approximated lookahead sets} The third option seems by far the simplest of all, and is recommended in many situations. However, it comes with a caveat. There may now exist states whose lookahead sets are under-approximated, in a certain sense. Because of this, there is a danger of writing an incomplete diagnostic message, one that does not list all valid continuations. To see this, let us look again at the sentence \texttt{ID COLON ID LPAREN}. In the grammar and automaton of \fref{fig:declarations}, this sentence takes us to the problematic state~8, shown in \fref{fig:declarations:over}. In the grammar and automaton of \fref{fig:declarations:onerrorreduce}, because more reduction actions are carried out before the error is detected, this sentence takes us to state~15, shown in \fref{fig:declarations:under}. When writing a diagnostic message for state~15, one might be tempted to write: ``Up to this point, a declaration has been recognized. At this point, a semicolon is expected''. Indeed, by examining the sole LR(1) item in state~15, it looks as if \verb+SEMICOLON+ is the only permitted continuation. However, this is not the case. Another valid continuation is \verb+ARROW+: indeed, the sentence \texttt{ID COLON ID ARROW ID SEMICOLON} forms a valid program. In fact, if the first token following \texttt{ID COLON ID} is \texttt{ARROW}, then in state~8 this token is shifted, so the two reductions that take us from state~8 through state~11 to state~15 never take place. This is why, even though \texttt{ARROW} does not appear in state~15 as a valid continuation, it nevertheless is a valid continuation of \texttt{ID COLON ID}. The warning produced by \menhir, shown in \fref{fig:declarations:under}, is supposed to attract attention to this issue. Another way to explain this issue is to point out that, by declaring \verb+%on_error_reduce typ1+, we make a choice. When the parser reads a type and finds an invalid token, it decides that this type is finished, even though, in reality, this type could be continued with \verb+ARROW+ \ldots. This in turn causes the parser to perform another reduction and consider the current declaration finished, even though, in reality, this declaration could be continued with \verb+ARROW+ \ldots. In summary, when writing a diagnostic message for state~15, one should take into account the fact that this state can be reached via spurious reductions and (therefore) \verb+SEMICOLON+ may not be the only permitted continuation. One way of doing this, without explicitly listing all permitted continuations, is to write: ``Up to this point, a declaration has been recognized. If this declaration is complete, then at this point, a semicolon is expected''. % ------------------------------------------------------------------------------ \subsection{A working example} \label{sec:errors:example} The CompCert verified compiler offers a real-world example of this approach to error handling. The ``pre-parser'' is where syntax errors are detected: see \compcertgithubfile{cparser/pre\_parser.mly}. % (The pre-parser is also in charge of distinguishing type names versus variable % names, but that is an independent issue.) A database of erroneous input sentences and (templates for) diagnostic messages is stored in \compcertgithubfile{cparser/handcrafted.messages}. It is compiled, using \ocompileerrors, to an \ocaml file named \texttt{cparser/pre\_parser\_messages.ml}. The function \verb+Pre_parser_messages.message+, which maps a state number to (a template for) a diagnostic message, is called from \compcertgithubfile{cparser/ErrorReports.ml}, where we construct and display a full-fledged diagnostic message. In CompCert, we allow a template for a diagnostic message to contain the special form \verb+$i+, where \verb+i+ is an integer constant, understood as an index into the parser's stack. The code in \compcertgithubfile{cparser/ErrorReports.ml} automatically replaces this special form with the fragment of the source text that corresponds to this stack entry. This mechanism is not built into \menhir; it is implemented in CompCert using \menhir's incremental API. % ------------------------------------------------------------------------------ \section{Coq back-end} \label{sec:coq} \menhir is able to generate a parser that whose correctness can be formally verified using the Coq proof assistant~\cite{jourdan-leroy-pottier-12}. This feature is used to construct the parser of the CompCert verified compiler~\cite{compcert}. Setting the \ocoq switch on the command line enables the Coq back-end. When this switch is set, \menhir expects an input file whose name ends in \vy and generates a Coq file whose name ends in \texttt{.v}. Like a \mly file, a \vy file is a grammar specification, with embedded semantic actions. The only difference is that the semantic actions in a \vy file are expressed in Coq instead of \ocaml. A \vy file otherwise uses the same syntax as a \mly file. CompCert's \compcertgithubfile{cparser/Parser.vy} serves as an example. Several restrictions are imposed when \menhir is used in \ocoq mode: % \begin{itemize} \item The error handling mechanism (\sref{sec:errors}) is absent. The \verb+$syntaxerror+ keyword and the \error token are not supported. \item Location information is not propagated. The \verb+$start*+ and \verb+$end*+ keywords (\fref{fig:pos}) are not supported. \item \dparameter (\sref{sec:parameter}) is not supported. \item \dinline (\sref{sec:inline}) is not supported. \item The standard library (\sref{sec:library}) is not supported, of course, because its semantic actions are expressed in \ocaml. If desired, the user can define an analogous library, whose semantic actions are expressed in Coq. \item Because Coq's type inference algorithm is rather unpredictable, the Coq type of every nonterminal symbol must be provided via a \dtype or \dstart declaration (\sref{sec:type}, \sref{sec:start}). \item Unless the proof of completeness has been deactivated using \ocoqnocomplete, the grammar must not have a conflict (not even a benign one, in the sense of \sref{sec:conflicts:benign}). That is, the grammar must be LR(1). Conflict resolution via priority and associativity declarations (\sref{sec:assoc}) is not supported. The reason is that there is no simple formal specification of how conflict resolution should work. \end{itemize} The generated file contains several modules: \begin{itemize} \item The module \verb+Gram+ defines the terminal and non-terminal symbols, the grammar, and the semantic actions. \item The module \verb+Aut+ contains the automaton generated by \menhir, together with a certificate that is checked by Coq while establishing the soundness and completeness of the parser. \end{itemize} The type~\verb+terminal+ of the terminal symbols is an inductive type, with one constructor for each terminal symbol. A terminal symbol named \verb+Foo+ in the \verb+.vy+ file is named \verb+Foo't+ in Coq. A~terminal symbol per se does not carry a the semantic value. We also define the type \verb+token+ of tokens, that is, dependent pairs of a terminal symbol and a semantic value of an appropriate type for this symbol. We model the lexer as an object of type \verb+Streams.Stream token+, that is, an infinite stream of tokens. % TEMPORARY documenter que du coup, après extraction, la seule façon pour un % lexer OCaml de produire des tokens, c'est d'utiliser Obj.magic % cf. la fonction compute_token_stream dans le Lexer.mll de Compcert: % Cons (Coq_existT (t, Obj.magic v), Lazy.from_fun compute_token_stream) The type~\verb+nonterminal+ of the non-terminal symbols is an inductive type, with one constructor for each non-terminal symbol. A non-terminal symbol named \verb+Bar+ in the \verb+.vy+ file is named \verb+Bar'nt+ in Coq. The proof of termination of an LR(1) parser in the case of invalid input seems far from obvious. We did not find such a proof in the literature. In an application such as CompCert~\cite{compcert}, this question is not considered crucial. For this reason, we did not formally establish the termination of the parser. Instead, in order to satisfy Coq's termination requirements, we use the ``fuel'' technique: the parser takes an additional parameter \verb+log_fuel+ of type \verb+nat+ such that $2^{\verb+log_fuel+}$ is the maximum number of steps the parser is allowed to perform. In practice, one can use a value of e.g., 40 or 50 to make sure the parser will never run out of fuel in a reasonnable time. Parsing can have three different outcomes, represented by the type \verb+parse_result+. % (This definition is implicitly parameterized over the initial state~\verb+init+. We omit the details here.) % \begin{verbatim} Inductive parse_result := | Fail_pr: parse_result | Timeout_pr: parse_result | Parsed_pr: symbol_semantic_type (NT (start_nt init)) -> Stream token -> parse_result. \end{verbatim} The outcome \verb+Fail_pr+ means that parsing has failed because of a syntax error. (If the completeness of the parser with respect to the grammar has been proved, this implies that the input is invalid). The outcome \verb+Timeout_pr+ means that the fuel has been exhausted. Of course, this cannot happen if the parser was given an infinite amount of fuel, as suggested above. The outcome \verb+Parsed_pr+ means that the parser has succeeded in parsing a prefix of the input stream. It carries the semantic value that has been constructed for this prefix, as well as the remainder of the input stream. For each entry point \verb+entry+ of the grammar, \menhir generates a parsing function \verb+entry+, whose type is \verb+nat -> Stream token -> parse_result+. % jh: Je suis un peu embêté, parce que init est % en réalité de type initstate, mais je n'ai pas envie d'en parler % dans la doc. Tout ce qui importe, c'est que le premier paramètre de % Parsed_pr a un type compatible avec le type que l'utilisateur a % donné. Two theorems are provided, named \verb+entry_point_correct+ and \verb+entry_point_complete+. The correctness theorem states that, if a word (a prefix of the input stream) is accepted, then this word is valid (with respect to the grammar) and the semantic value that is constructed by the parser is valid as well (with respect to the grammar). The completeness theorem states that if a word (a prefix of the input stream) is valid (with respect to the grammar), then (given sufficient fuel) it is accepted by the parser. These results imply that the grammar is unambiguous: for every input, there is at most one valid interpretation. This is proved by another generated theorem, named \verb+Parser.unambiguous+. % jh: Pas besoin de prouver la terminaison pour avoir la non-ambiguïté, car % les cas de non-terminaison ne concernent que les entrées invalides. % fp: bien vu! % fp: ce serait intéressant d'avoir un certificat comme quoi la grammaire est % bien LR(1), mais peut-être qu'on s'en fout. C'est bien de savoir qu'elle % est non-ambiguë. % jh: Je ne sais pas ce que c'est qu'un certificat comme quoi la grammaire % est LR(1), en pratique... % fp: Ce serait une preuve d'un théorème, exprimé uniquement en termes de % la grammaire, comme quoi la grammaire est LR(1). Il y a une définition % de cette propriété dans le textbook de Aho et Ullman, si je me rappelle % bien. Mais peu importe. % fp: On pourrait aussi souhaiter un théorème comme quoi le parser ne lit % pas le stream trop loin... % jh: pour vraiment prouver cela, il faudrait inverser le % controle. Sinon, comme résultat un peu moins fort, dans la version % actuelle, on renvoie le stream restant, et on prouve qu'il % correspond bien à la fin du Stream. The parsers produced by \menhir's Coq back-end must be linked with a Coq library. This library can be installed via the command \verb+opam install coq-menhirlib+.% % \footnote{This assumes that you have installed \texttt{opam}, the OCaml package manager, and that you have run the command \texttt{opam repo add coq-released https://coq.inria.fr/opam/released}.} % The Coq sources of this library can be found in the \texttt{coq-menhirlib} directory of the Menhir repository. The CompCert verified compiler~\cite{compcert,compcert-github} can be used as an example if one wishes to use \menhir to generate a formally verified parser as part of some other project. See in particular the directory \compcertgithubfile{cparser}. % ------------------------------------------------------------------------------ \section{Building grammarware on top of \menhir} \label{sec:grammarware} It is possible to build a variety of grammar-processing tools, also known as ``grammarware''~\cite{klint-laemmel-verhoef-05}, on top of \menhir's front-end. Indeed, \menhir offers a facility for dumping a \cmly file, which contains a (binary-form) representation of the grammar and automaton, as well as a library, \menhirsdk, for (programmatically) reading and exploiting a \cmly file. These facilities are described in \sref{sec:sdk}. % Furthermore, \menhir allows decorating a grammar with ``attributes'', which are ignored by \menhir's back-ends, yet are written to the \cmly file, thus can be exploited by other tools, via \menhirsdk. % Attributes are described in \sref{sec:attributes}. \subsection{\menhir's SDK} \label{sec:sdk} The command line option \ocmly causes \menhir to produce a \cmly file in addition to its normal operation. This file contains a (binary-form) representation of the grammar and automaton. This is the grammar that is obtained after the following steps have been carried out: \begin{itemize} \item joining multiple \mly files, if necessary; % in fact, always (due to standard.mly) \item eliminating anonymous rules; \item expanding away parameterized nonterminal symbols; \item removing unreachable nonterminal symbols; \item performing \ocaml type inference, if the \oinfer switch is used; \item inlining away nonterminal symbols that are decorated with \dinline. \end{itemize} The library \menhirsdk offers an API for reading a \cmly file. The functor \repo{src/cmly_read.mli}{\texttt{MenhirSdk.Cmly\_read.Read}} reads such a file and produces a module whose signature is \repo{src/cmly_api.ml}{\texttt{MenhirSdk.Cmly\_api.GRAMMAR}}. This API is not explained in this document; for details, the reader is expected to follow the above links. % TEMPORARY mention the demo generate-printers % as an example of both the SDK and attributes % (possibly make it an independent package) \subsection{Attributes} \label{sec:attributes} Attributes are decorations that can be placed in \mly files. They are ignored by \menhir's back-ends, but are written to \cmly files, thus can be exploited by other tools, via \menhirsdk. An attribute consists of a name and a payload. An attribute name is an \ocaml identifier, such as \texttt{cost}, or a list of \ocaml identifiers, separated with dots, such as \texttt{my.name}. An attribute payload is an \ocaml expression of arbitrary type, such as \texttt{1} or \verb+"&&"+ or \verb+print_int+. Following the syntax of \ocaml's attributes, an attribute's name and payload are separated with one or more spaces, and are delimited by \verb+[@+ and \verb+]+. Thus, \verb+[@cost 1]+ and \verb+[@printer print_int]+ are examples of attributes. An attribute can be attached at one of four levels: % grammar-level attributes, %[@foo ...] % terminal attribute, %token BAR [@foo ...] % nonterminal attribute, bar [@foo ...]: ... % producer attribute, e = expr [@foo ...] \begin{enumerate} \item An attribute can be attached with the grammar. Such an attribute must be preceded with a \verb+%+ sign and must appear in the declarations section (\sref{sec:decls}). For example, the following is a valid declaration: \begin{verbatim} %[@trace true] \end{verbatim} \item An attribute can be attached with a terminal symbol. Such an attribute must follow the declaration of this symbol. For example, the following is a valid declaration of the terminal symbol \verb+INT+: \begin{verbatim} %token<int> INT [@cost 0] [@printer print_int] \end{verbatim} \item An attribute can be attached with a nonterminal symbol. Such an attribute must appear inside the rule that defines this symbol, immediately after the name of this symbol. For instance, the following is a valid definition of the nonterminal symbol \verb+expr+: \begin{verbatim} expr [@default EConst 0]: i = INT { EConst i } | e1 = expr PLUS e2 = expr { EAdd (e1, e2) } \end{verbatim} An attribute can be attached with a parameterized nonterminal symbol: \begin{verbatim} option [@default None] (X): { None } | x = X { Some x } \end{verbatim} An attribute cannot be attached with a nonterminal symbol that is decorated with the \dinline keyword. \item An attribute can be attached with a producer (\sref{sec:producers}), that is, with an occurrence of a terminal or nonterminal symbol in the right-hand side of a production. Such an attribute must appear immediately after the producer. For instance, in the following rule, an attribute is attached with the producer \verb+expr*+: \begin{verbatim} exprs: LPAREN es = expr* [@list true] RPAREN { es } \end{verbatim} \end{enumerate} % %attribute declarations: As a convenience, it is possible to attach many attributes with many (terminal and nonterminal) symbols in one go, via an \dattribute declaration, which must be placed in the declarations section (\sref{sec:decls}). For instance, the following declaration attaches both of the attributes \verb+[@cost 0]+ and \verb+[@precious false]+ with each of the symbols \verb+INT+ and \verb+id+: \begin{verbatim} %attribute INT id [@cost 0] [@precious false] \end{verbatim} An \dattribute declaration can be considered syntactic sugar: it is desugared away in terms of the four forms of attributes presented earlier. (The command line switch \oonlypreprocess can be used to see how it is desugared.) % Interaction of %attribute declarations and parameterized nonterminals: If an attribute is attached with a parameterized nonterminal symbol, then, when this symbol is expanded away, the attribute is transmitted to every instance. For instance, in an earlier example, the attribute \verb+[@default None]+ was attached with the parameterized symbol \verb+option+. Then, every instance of \verb+option+, such as \verb+option(expr)+, \verb+option(COMMA)+, and so on, inherits this attribute. To attach an attribute with one specific instance only, one can use an \dattribute declaration. For instance, the declaration \verb+%attribute option(expr) [@cost 10]+ attaches an attribute with the nonterminal symbol \verb+option(expr)+, but not with the symbol \verb+option(COMMA)+. % ------------------------------------------------------------------------------ \section{Interaction with build systems} \label{sec:build} This section explains some details of the compilation workflow, including \ocaml type inference and its repercussions on dependency analysis (\sref{sec:build:infer}) and compilation flags (\sref{sec:build:flags}). % This material should be of interest only to authors of build systems who wish to build support for \menhir into their system. % Ordinary users should skip this section and use a build system that knows about \menhir, such as \ocamlbuild or \dune. \subsection{\ocaml type inference and dependency analysis} \label{sec:build:infer} In an ideal world, the semantic actions in a \mly file should be well-typed according to the \ocaml type discipline, and their types should be known to \menhir, which may need this knowledge. (When \oinspection is set, \menhir needs to know the \ocaml type of every nonterminal symbol.) % To address this problem, three approaches exist: \begin{itemize} \item Ignore the problem and let \menhir run without \ocaml type information (\sref{sec:build:infer:none}). \item Let \menhir obtain \ocaml type information by invoking the \ocaml compiler (\sref{sec:build:infer:direct}). \item Let \menhir request and receive \ocaml type information without invoking the \ocaml compiler (\sref{sec:build:infer:indirect}). \end{itemize} \subsubsection{Running without \ocaml type information} \label{sec:build:infer:none} The simplest thing to do is to run \menhir \emph{without} any of the flags described in the following (\sref{sec:build:infer:direct}, \sref{sec:build:infer:indirect}). % Then, the semantic actions are \emph{not} type-checked, and their \ocaml type is \emph{not} inferred. % (This is analogous to using \ocamlyacc.) % The drawbacks of this approach are as follows: \begin{itemize} \item A type error in a semantic action is detected only when the \ml file produced by \menhir is type-checked. The location of the type error, as reported by the \ocaml compiler, can be suboptimal. % I think that the type error should be reported inside a semantic % action (we produce # directives for this purpose). Yet I am not % certain that this will be the case. Plus, the type error could be % reported inside Menhir's standard library, whereas when --infer is % used, we place the standard library first, so as to ensure that no % type error is found inside it. (See [infer.ml].) \item Unless a \dtype declaration for every nonterminal symbol is given, the inspection API cannot be generated, that is, \oinspection must be turned off. \end{itemize} \subsubsection{Obtaining \ocaml type information by calling the \ocaml compiler} \label{sec:build:infer:direct} The second approach is to let \menhir invoke the \ocaml compiler so as to type-check the semantic actions and infer their types. This is done by invoking \menhir with the \oinfer switch, as follows. \docswitch{\oinfer} This switch causes the semantic actions to be checked for type consistency \emph{before} the parser is generated. To do so, \menhir generates a mock \ml file, which contains just the semantic actions, and invokes the \ocaml compiler, under the form \verb+ocamlc -i+, so as to type-check this file and infer the types of the semantic actions. \menhir then reads this information and produces real \ml and \mli files. % There is a slight catch with \oinfer. The types inferred by \ocamlc are valid % in the toplevel context, but can change meaning when inserted into a local % context. \docswitch{\oocamlc \nt{command}} This switch controls how \ocamlc is invoked. It allows setting both the name of the executable and the command line options that are passed to it. \docskip One difficulty with the this approach is that the \ocaml compiler usually needs to consult a few \texttt{.cm[iox]} files. Indeed, if the \mly file contains a reference to an external \ocaml module, say \texttt{A}, then the \ocaml compiler typically needs to read one or more files named \texttt{A.cm[iox]}. This implies that these files must have been created first. But how is one supposed to know, exactly, which files should be created first? One must scan the \mly file so as to find out which external modules it depends upon. In other words, a dependency analysis is required. This analysis can be carried out by invoking \menhir with the \odepend switch, as follows. \docswitch{\odepend} This switch causes \menhir to generate dependency information for use in conjunction with \make. When invoked in this mode, \menhir does not generate a parser. Instead, it examines the grammar specification and prints a list of prerequisites for the targets \nt{basename}\texttt{.cm[iox]}, \nt{basename}\texttt{.ml}, and \nt{basename}\texttt{.mli}. This list is intended to be textually included within a \Makefile. % % It is important to note that \nt{basename}\texttt{.ml} and % \nt{basename}\texttt{.mli} can have \texttt{.cm[iox]} prerequisites. This is % because, when the \oinfer switch is used, \menhir infers types by invoking % \ocamlc, and \ocamlc itself requires the \ocaml modules that the grammar % specification depends upon to have been compiled first. % To produce this list, \menhir generates a mock \ml file, which contains just the semantic actions, invokes \ocamldep, and postprocesses its output. \docswitch{\orawdepend} This switch is analogous to \odepend. However, in this case, \ocamldep's output is \emph{not} postprocessed by \menhir: it is echoed without change. This switch is not suitable for direct use with \make; it is intended for use with \omake or \ocamlbuild, which perform their own postprocessing. \docswitch{\oocamldep \nt{command}} This switch controls how \ocamldep is invoked. It allows setting both the name of the executable and the command line options that are passed to it. \subsubsection{Obtaining \ocaml type information without calling the \ocaml compiler} \label{sec:build:infer:indirect} The third approach is to let \menhir request and receive \ocaml type information \emph{without} allowing \menhir to invoke the \ocaml compiler. There is nothing magic about this: to achieve this, \menhir must be invoked twice, and the \ocaml compiler must be invoked (by the user, or by the build system) in between. This is done as follows. \docswitch{\oinferwrite \nt{mockfilename}} When invoked in this mode, \menhir does not generate a parser. Instead, generates a mock \ml file, named \nt{mockfilename}, which contains just the semantic actions. Then, it stops. \docskip It is then up to the user (or to the build system) to invoke \verb+ocamlc -i+ so as to type-check the mock \ml file and infer its signature. The output of this command should be redirected to some file \nt{sigfilename}. Then, \menhir can be invoked again, as follows. \docswitch{\oinferread \nt{sigfilename}} When invoked in this mode, \menhir assumes that the file \nt{sigfilename} contains the result of running \verb+ocamlc -i+ on the file \nt{mockfilename}. It reads and parses this file, so as to obtain the \ocaml type of every semantic action, then proceeds normally to generate a parser. \docskip This protocol was introduced on 2018/05/23; earlier versions of \menhir do not support it. Its existence can be tested as follows: \docswitch{\oinferprotocolsupported} When invoked with this switch, \menhir immediately terminates with exit code 0. An earlier version of \menhir, which does not support this protocol, would display a help message and terminate with a nonzero exit code. \subsection{Compilation flags} \label{sec:build:flags} The following switches allow querying \menhir so as to find out which compilation flags should be passed to the \ocaml compiler and linker. \docswitch{\osuggestcomp} This switch causes \menhir to print a set of suggested compilation flags, and exit. These flags are intended to be passed to the \ocaml compilers (\ocamlc or \ocamlopt) when compiling and linking the parser generated by \menhir. What are these flags? In the absence of the \otable switch, they are empty. When \otable is set, these flags ensure that \menhirlib is visible to the \ocaml compiler. If the support library \menhirlib was installed via \ocamlfind, a \texttt{-package} directive is issued; otherwise, a \texttt{-I} directive is used. % The file \distrib{demos/obsolete/Makefile.shared} shows how to exploit % the \texttt{--suggest-*} switches. \docswitch{\osuggestlinkb} This switch causes \menhir to print a set of suggested link flags, and exit. These flags are intended to be passed to \texttt{ocamlc} when producing a bytecode executable. What are these flags? In the absence of the \otable switch, they are empty. When \otable is set, these flags ensure that \menhirlib is linked in. If the support library \menhirlib was installed via \ocamlfind, a \texttt{-linkpkg} directive is issued; otherwise, the object file \texttt{menhirLib.cmo} is named. % The file \distrib{demos/obsolete/Makefile.shared} shows how to exploit % the \texttt{--suggest-*} switches. \docswitch{\osuggestlinko} This switch causes \menhir to print a set of suggested link flags, and exit. These flags are intended to be passed to \texttt{ocamlopt} when producing a native code executable. What are these flags? In the absence of the \otable switch, they are empty. When \otable is set, these flags ensure that \menhirlib is linked in. If the support library \menhirlib was installed via \ocamlfind, a \texttt{-linkpkg} directive is issued; otherwise, the object file \texttt{menhirLib.cmx} is named. % The file \distrib{demos/obsolete/Makefile.shared} shows how to exploit % the \texttt{--suggest-*} switches. \docswitch{\osuggestmenhirlib} This switch causes \menhir to print (the absolute path of) the directory where \menhirlib was installed. If \menhirlib was installed via \ocamlfind, this is equivalent to calling \texttt{ocamlfind query menhirLib}. \docswitch{\osuggestocamlfind} This switch causes \menhir to print a Boolean value (i.e., either \texttt{true} or \texttt{false}), which indicates whether \menhirlib was installed via \ocamlfind. % ------------------------------------------------------------------------------ \section{Comparison with \ocamlyacc} % TEMPORARY idéalement, il faudrait documenter la différence de comportement % sur les réductions par défaut (sur des symboles autres que #). Roughly speaking, Menhir is 90\% compatible with \ocamlyacc. Legacy \ocamlyacc grammar specifications are accepted and compiled by Menhir. The resulting parsers run and produce correct parse trees. However, parsers that explicitly invoke functions in the module \texttt{Parsing} behave slightly incorrectly. For instance, the functions that provide access to positions return a dummy position when invoked by a Menhir parser. Porting a grammar specification from ocamlyacc to Menhir requires replacing all calls to \texttt{Parsing} with new Menhir-specific keywords (\sref{sec:positions}). Here is an incomplete list of the differences between \ocamlyacc and \menhir. The list is roughly sorted by decreasing order of importance. \begin{itemize} \item \menhir allows the definition of a nonterminal symbol to be parameterized (\sref{sec:templates}). A formal parameter can be instantiated with a terminal symbol, a nonterminal symbol, or an anonymous rule (\sref{sec:actual}). A library of standard parameterized definitions (\sref{sec:library}), including options, sequences, and lists, is bundled with Menhir. EBNF syntax is supported: the modifiers \dquestion, \dplus, and \dstar are sugar for options, nonempty lists, and arbitrary lists (\fref{fig:sugar}). \item \ocamlyacc only accepts LALR(1) grammars. \menhir accepts LR(1) grammars, thus avoiding certain artificial conflicts. \item \menhir's \dinline keyword (\sref{sec:inline}) helps avoid or resolve some LR(1) conflicts without artificial modification of the grammar. \item \menhir explains conflicts (\sref{sec:conflicts}) in terms of the grammar, not just in terms of the automaton. \menhir's explanations are believed to be understandable by mere humans. \item \menhir offers an incremental API (in \otable mode only) (\sref{sec:incremental}). This means that the state of the parser can be saved at any point (at no cost) and that parsing can later be resumed from a saved state. \item \menhir offers a set of tools for building a (complete, irredundant) set of invalid input sentences, mapping each such sentence to a (hand-written) error message, and maintaining this set as the grammar evolves (\sref{sec:errors:new}). \item In \ocoq mode, \menhir produces a parser whose correctness and completeness with respect to the grammar can be checked by Coq (\sref{sec:coq}). \item \menhir offers an interpreter (\sref{sec:interpret}) that helps debug grammars interactively. \item \menhir allows grammar specifications to be split over multiple files (\sref{sec:split}). It also allows several grammars to share a single set of tokens. \item \menhir produces reentrant parsers. \item \menhir is able to produce parsers that are parameterized by \ocaml modules. \item \ocamlyacc requires semantic values to be referred to via keywords: \verb+$1+, \verb+$2+, and so on. \menhir allows semantic values to be explicitly named. \item \menhir warns about end-of-stream conflicts (\sref{sec:eos}), whereas \ocamlyacc does not. \menhir warns about productions that are never reduced, whereas, at least in some cases, \ocamlyacc does not. \item \menhir offers an option to typecheck semantic actions \emph{before} a parser is generated: see \oinfer. \item \ocamlyacc produces tables that are interpreted by a piece of C code, requiring semantic actions to be encapsulated as \ocaml closures and invoked by C code. \menhir offers a choice between producing tables and producing code. In either case, no C code is involved. \item \menhir makes \ocaml's standard library module \texttt{Parsing} entirely obsolete. Access to locations is now via keywords (\sref{sec:positions}). Uses of \verb+raise Parse_error+ within semantic actions are deprecated. The function \verb+parse_error+ is deprecated. They are replaced with keywords (\sref{sec:errors}). \item \menhir's error handling mechanism (\sref{sec:errors}) is inspired by \ocamlyacc's, but is not guaranteed to be fully compatible. Error recovery, also known as re-synchronization, is not supported by \menhir. \item The way in which severe conflicts (\sref{sec:conflicts}) are resolved is not guaranteed to be fully compatible with \ocamlyacc. \item \menhir warns about unused \dtoken, \dnonassoc, \dleft, and \dright declarations. It also warns about \dprec annotations that do not help resolve a conflict. \item \menhir accepts \ocaml-style comments. \item \menhir allows \dstart and \dtype declarations to be condensed. \item \menhir allows two (or more) productions to share a single semantic action. \item \menhir produces better error messages when a semantic action contains ill-balanced parentheses. % \item \ocamlyacc allows nonterminal start symbols to start with an uppercase % letter, and produces invalid \ocaml code in that case. \menhir disallows this. \item \ocamlyacc ignores semicolons and commas everywhere. \menhir regards semicolons and commas as significant, and allows them, or requires them, in certain well-defined places. % \item \ocamlyacc ignores multiple definitions of a token, even when two of them are at % different types. \menhir rejects this. \item \ocamlyacc allows \dtype declarations to refer to terminal or non-terminal symbols, whereas \menhir requires them to refer to non-terminal symbols. Types can be assigned to terminal symbols with a \dtoken declaration. \end{itemize} % ------------------------------------------------------------------------------ \section{Questions and Answers} \label{sec:qa} $\mathstrut$ % Ensure correct indentation of the first question. Ugly. \vspace{-\baselineskip} \question{Is \menhir faster than \ocamlyacc? What is the speed difference between \texttt{menhir} and \texttt{menhir -{}-table}?} A (not quite scientific) benchmark suggests that the parsers produced by \ocamlyacc and \texttt{menhir -{}-table} have comparable speed, whereas those produced by \texttt{menhir} are between 2 and 5 times faster. This benchmark excludes the time spent in the lexer and in the semantic actions. \question{How do I write \Makefile rules for \menhir?} This can a bit tricky. % understatement If you must do this, see \sref{sec:build}. % and look at \distrib{demos/obsolete/Makefile.shared}. It is recommended instead to use a build system with built-in support for \menhir, such as \ocamlbuild or \dune. \question{How do I use \menhir with \ocamlbuild?} Pass \verb+-use-ocamlfind -use-menhir+ to \ocamlbuild. % (Assuming that \menhir was installed via ocamlfind.) To pass options to \menhir, pass \verb+-menhir "menhir <options>"+ to \ocamlbuild. To use \menhir's table-based back-end, pass \verb+-menhir "menhir --table"+ to \ocamlbuild, and either pass \verb+-package menhirLib+ to \ocamlbuild or add the tag \verb+package(menhirLib)+ in the \verb+_tags+ file. To combine multiple \mly files, say \verb+a.mly+ and \verb+b.mly+, into a single parser, say \verb+parser.{ml,mli}+, create a file named \verb+parser.mlypack+ that contains the module names \verb+A B+. See the \distrib{demos} directory for examples. % Advanced scenario: to use --only-tokens and -external-tokens, % use .mlypack + _tags + myocamlbuild.ml. Not explained here, % but \distrib{demos/calc-two} contains an example. \question{How do I use \menhir with \dune?} Please use \dune version 1.4.0 or newer, as it has appropriate built-in rules for Menhir parsers. In the simplest scenario, where the parser resides in a single source file \texttt{parser.mly}, the \texttt{dune-project} file should contain a ``stanza'' along the following lines: \begin{verbatim} (menhir ( (modules (parser)) (flags ("--explain" "--dump")) (infer true) )) \end{verbatim} The \oinfer switch has special status and should not be used directly; instead, write \texttt{(infer true)} or \texttt{(infer false)}, as done above. (The default is \texttt{true}.) Ordinary command line switches, like \oexplain and \odump, are passed as part of the \texttt{flags} line, as done above. % The directory \distrib{demos/calc-dune} % (and others like it) offers an example. % For more details, see \href{https://jbuilder.readthedocs.io/en/latest/menhir.html}{\dune's documentation}. % It may be necessary to specify which version of the Menhir build rules % one wishes to use. This is done by writing, e.g. % \begin{verbatim} % (using menhir 2.0) % \end{verbatim} % at the top level of the \texttt{dune-project} file. % However, my understanding is that this is usually not necessary. % \dune will automatically add this line for us % when a project is first compiled. \question{\menhir reports \emph{more} shift/reduce conflicts than \ocamlyacc! How come?} \ocamlyacc sometimes merges two states of the automaton that \menhir considers distinct. This happens when the grammar is not LALR(1). If these two states happen to contain a shift/reduce conflict, then \menhir reports two conflicts, while \ocamlyacc only reports one. Of course, the two conflicts are very similar, so fixing one will usually fix the other as well. \question{I do not use \ocamllex. Is there an API that does not involve lexing buffers?} Like \ocamlyacc, \menhir produces parsers whose monolithic API (\sref{sec:monolithic}) is intended for use with \ocamllex. However, it is possible to convert them, after the fact, to a simpler, revised API. In the revised API, there are no lexing buffers, and a lexer is just a function from unit to tokens. Converters are provided by the library module \menhirlibconvert. This can be useful, for instance, for users of \texttt{ulex}, the Unicode lexer generator. Also, please note that \menhir's incremental API (\sref{sec:incremental}) does not mention the type \verb+Lexing.lexbuf+. In this API, the parser expects to be supplied with triples of a token and start/end positions of type \verb+Lexing.position+. \question{I need both \dinline and non-\dinline versions of a non-terminal symbol. Is this possible?} Define an \dinline version first, then use it to define a non-\dinline version, like this: \begin{verbatim} %inline ioption(X): (* nothing *) { None } | x = X { Some x } option(X): o = ioption(X) { o } \end{verbatim} This can work even in the presence of recursion, as illustrated by the following definition of (reversed, left-recursive, possibly empty) lists: \begin{verbatim} %inline irevlist(X): (* nothing *) { [] } | xs = revlist(X) x = X { x :: xs } revlist(X): xs = irevlist(X) { xs } \end{verbatim} The definition of \verb+irevlist+ is expanded into the definition of \verb+revlist+, so in the end, \verb+revlist+ receives its normal, recursive definition. One can then view \verb+irevlist+ as a variant of \verb+revlist+ that is inlined one level deep. % Intentionally do not call this "list", because people may copy-paste this % definition, and will end up unintentionally redefining the meaning of *. \question{Can I ship a generated parser while avoiding a dependency on \menhirlib?} Yes. One option is to use the code-based back-end (that is, to not use \otable). In this case, the generated parser is self-contained. Another option is to use the table-based back-end (that is, use \otable) and include a copy of the files \verb+menhirLib.{ml,mli}+ together with the generated parser. The command \texttt{menhir \osuggestmenhirlib} will tell you where to find these source files. \question{Why is \texttt{\$startpos} off towards the left? It seems to include some leading whitespace.} Indeed, as of 2015/11/04, the computation of positions has changed so as to match \ocamlyacc's behavior. As a result, \texttt{\$startpos} can now appear to be too far off to the left. This is explained in \sref{sec:positions}. In short, the solution is to use \verb+$symbolstartpos+ instead. \question{Can I pretty-print a grammar in ASCII, HTML, or \LaTeX{} format?} Yes. Have a look at \texttt{obelisk} \cite{obelisk}. \question{Does \menhir support mid-rule actions?} Yes. See \nt{midrule} and its explanation in \sref{sec:library}. % ------------------------------------------------------------------------------ \section{Technical background} After experimenting with Knuth's canonical LR(1) technique~\cite{knuth-lr-65}, we found that it \emph{really} is not practical, even on today's computers. For this reason, \menhir implements a slightly modified version of Pager's algorithm~\cite{pager-77}, which merges states on the fly if it can be proved that no reduce/reduce conflicts will arise as a consequence of this decision. This is how \menhir avoids the so-called \emph{mysterious} conflicts created by LALR(1) parser generators~\cite[section 5.7]{bison}. \menhir's algorithm for explaining conflicts is inspired by DeRemer and Pennello's~\cite{deremer-pennello-82} and adapted for use with Pager's construction technique. By default, \menhir produces code, as opposed to tables. This approach has been explored before~\cite{bhamidipaty-proebsting-98,horspool-faster-90}. \menhir performs some static analysis of the automaton in order to produce more compact code. When asked to produce tables, \menhir performs compression via first-fit row displacement, as described by Tarjan and Yao~\cite{tarjan-yao-79}. Double displacement is not used. The action table is made sparse by factoring out an error matrix, as suggested by Dencker, Dürre, and Heuft~\cite{dencker-84}. The type-theoretic tricks that triggered our interest in LR parsers~\cite{pottier-regis-gianas-typed-lr} are not implemented in \menhir. In the beginning, we did not implement them because the \ocaml compiler did not at the time offer generalized algebraic data types (GADTs). Today, \ocaml has GADTs, but, as the saying goes, ``if it ain't broken, don't fix it''. The main ideas behind the Coq back-end are described in a paper by Jourdan, Pottier and Leroy~\cite{jourdan-leroy-pottier-12}. The C11 parser in the CompCert compiler~\cite{compcert} is constructed by Menhir and verified by Coq, following this technique. How to construct a correct C11 parser using Menhir is described by Jourdan and Pottier~\cite{jourdan-pottier-17}. The approach to error reports presented in \sref{sec:errors:new} was proposed by Jeffery~\cite{jeffery-03} and further explored by Pottier~\cite{pottier-reachability-cc-2016}. % ------------------------------------------------------------------------------ \section{Acknowledgements} \menhir's interpreter (\ointerpret) and table-based back-end (\otable) were implemented by Guillaume Bau, Raja Boujbel, and François Pottier. The project was generously funded by Jane Street Capital, LLC through the ``OCaml Summer Project'' initiative. Frédéric Bour provided motivation and an initial implementation for the incremental API, for the inspection API, for attributes, and for \menhirsdk. \href{https://github.com/ocaml/merlin}{Merlin}, an emacs mode for \ocaml, contains an impressive incremental, syntax-error-tolerant \ocaml parser, which is based on \menhir and has been a driving force for \menhir's APIs. Jacques-Henri Jourdan designed and implemented the Coq back-end and did the Coq proofs for it. Gabriel Scherer provided motivation for investigating Jeffery's technique. % ------------------------------------------------------------------------------ % Bibliography. \bibliographystyle{plain} \bibliography{local} \end{document} % LocalWords: Yann Régis Gianas Regis inria Menhir filename mly basename Coq % LocalWords: coq vy tt Coq's iox Menhir's nonterminal graphviz nullable calc % LocalWords: inline postprocessed postprocessing ocamlc bytecode linkpkg cmo % LocalWords: menhirLib ocamlopt cmx qa ocamlrun runtime uid productiongroups % LocalWords: prec Actuals parameterization Parameterizing ds actuals plist xs % LocalWords: loption LPAREN RPAREN Inlining inlined inlining lp ioption bool % LocalWords: boption sep nonassociative multi basicshiftreduce lookahead decl % LocalWords: UIDENT LIDENT decls tycon expr exprs basiceos basiceosdump lex % LocalWords: curr Lexing lexbuf pos cnum startpos endpos startofs endofs LALR % LocalWords: syntaxerror whitespace EOL cst API lexing MenhirInterpreter pc % LocalWords: InputNeeded HandlingError env CompCert Aut se nat init cparser % LocalWords: validator subdirectory EBNF reentrant eos typecheck menhir ulex % LocalWords: DeRemer Pennello's Tarjan Yao Dencker Dürre Heuft Bau Raja LLC % LocalWords: Acknowledgements Boujbel Frédéric Bour
{ "alphanum_fraction": 0.7466949739, "avg_line_length": 44.7109641638, "ext": "tex", "hexsha": "0f74363cb543daf164d0a77d29b8709e1cfda211", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f3ca3947cc437bbee66490c814b83d34c7ee50b8", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sibis/pesy", "max_forks_repo_path": "vendor/menhir/doc/manual.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f3ca3947cc437bbee66490c814b83d34c7ee50b8", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sibis/pesy", "max_issues_repo_path": "vendor/menhir/doc/manual.tex", "max_line_length": 128, "max_stars_count": null, "max_stars_repo_head_hexsha": "f3ca3947cc437bbee66490c814b83d34c7ee50b8", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sibis/pesy", "max_stars_repo_path": "vendor/menhir/doc/manual.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 54396, "size": 209605 }
\subsection{Variable resistors}
{ "alphanum_fraction": 0.7941176471, "avg_line_length": 8.5, "ext": "tex", "hexsha": "4d5ee224e8438df8eee026fb4f93d30fb982bc23", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/engineering/engineeringElectrical/06-01-Variable_resistors.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/engineering/engineeringElectrical/06-01-Variable_resistors.tex", "max_line_length": 31, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/engineering/engineeringElectrical/06-01-Variable_resistors.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 8, "size": 34 }
\section{Principle of Strong Induction} Here is the Principle of Strong Induction: \begin{theorem} Suppose $P(n)$ is a proposition, and that the following statements hold: \begin{itemize} \item $P(0)$ is true, \item If $P(i)$ is true for all $0 \leq i \leq n$, then we can prove that $P(n + 1)$ is true. \end{itemize} Then $P(n)$ holds for all $n \in \mathbb{N}$. \end{theorem} \begin{proof} In fact, this theorem actually directly follows from the Principle of Weak Induction. To see this clearly, define the proposition $Q(n)$ to be equivalent to the claim that $P(i)$ is true for $0 \leq i \leq n$. Then by the hypotheses: \begin{itemize} \item $Q(0)$ is true, \item If $Q(n)$ is true, then $P(n + 1)$ is true. This essentially means that if $Q(n)$ is true, then $P(i)$ is true for all $0 \leq i \leq n + 1$, implying that $Q(n + 1)$ is true. \end{itemize} So by the Principle of Weak Induction $Q(n)$ is true for all natural numbers $n$, which implies that $P(n)$ is true for all natural numbers $n$ as well. \end{proof} %note: define inductive hypothesis in weak_ind.tex The Principle of Strong Induction is seemingly ``stronger'' than the Principle of Weak Induction. The reason behind this is because of the nature of the theorem's ``induction hypothesis'', which assumes the truth of $P(i)$ for all $i$ up to a value $n$. We may apply this principle to prove certain statements where in order to complete the inductive step assumptions about more than just the previous steps are needed. Here is a particular example of this. \section{Exercises} \begin{enumerate} \item \input{Ch5/2_ex/problem_1} \item \input{Ch5/2_ex/problem_2} \item \input{Ch5/2_ex/problem_3} \item \input{Ch5/2_ex/problem_4} \item \input{Ch5/2_ex/problem_5} \end{enumerate}
{ "alphanum_fraction": 0.7160424818, "avg_line_length": 52.6176470588, "ext": "tex", "hexsha": "84134b7b78048e405b6430d8150a1f7c6f66a862", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f18413d1eb0ed598b325e5cd8052fcc571337926", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jonlin1000/discr_math", "max_forks_repo_path": "Ch5/strong_ind.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f18413d1eb0ed598b325e5cd8052fcc571337926", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jonlin1000/discr_math", "max_issues_repo_path": "Ch5/strong_ind.tex", "max_line_length": 458, "max_stars_count": 3, "max_stars_repo_head_hexsha": "f18413d1eb0ed598b325e5cd8052fcc571337926", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jonlin1000/discr_math", "max_stars_repo_path": "Ch5/strong_ind.tex", "max_stars_repo_stars_event_max_datetime": "2020-11-14T02:26:40.000Z", "max_stars_repo_stars_event_min_datetime": "2020-04-22T03:31:37.000Z", "num_tokens": 526, "size": 1789 }
% !TeX program = pdfLaTeX \documentclass[smallextended]{svjour3} % onecolumn (second format) %\documentclass[twocolumn]{svjour3} % twocolumn % \smartqed % flush right qed marks, e.g. at end of proof % \usepackage{amsmath} \usepackage{graphicx} \usepackage[utf8]{inputenc} \usepackage[hyphens]{url} % not crucial - just used below for the URL \usepackage{hyperref} \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} % % \usepackage{mathptmx} % use Times fonts if available on your TeX system % % insert here the call for the packages your document requires %\usepackage{latexsym} % etc. % % please place your own definitions here and don't use \def but % \newcommand{}{} % % Insert the name of "your journal" with % \journalname{myjournal} % %% load any required packages here \usepackage{float} \floatplacement{figure}{tb} \begin{document} \title{The dynamics of risk perception in a Mediterranean agroecosystem } \titlerunning{The dynamics of risk perception in a Mediterranean agroecosystem} \author{ Nicolas Gauthier \and } \authorrunning{ Gauthier } \institute{ Nicolas Gauthier \at Laboratory of Tree-Ring Research \& School of Geography and Development, University of Arizona \\ \email{\href{mailto:[email protected]}{\nolinkurl{[email protected]}}} % \\ % \emph{Present address:} of F. Author % if needed \and } \date{Received: date / Accepted: date} % The correct dates will be entered by the editor \maketitle \begin{abstract} Small-scale agriculturalists in the Mediterranean Basin rely on multiple strategies including diversification, intensification, and storage to maintain a stable food supply in the face of environmental uncertainty. Each of these strategies requires farmers to make specific resource allocation decisions in response to environmental risks and is thus sensitive to variability in both the spatiotemporal pattern of risk and the ability of farmers to perceive that pattern. In this chapter, I present a simple agent-based model of a Mediterranean agroecosystem. By driving the model with realistic environmental dynamics derived from simulations of mid-Holocene Mediterranean climate, and by allowing the psychology of risk perception to vary among individual farmers, I explore the hidden vulnerabilities of traditional risk-management strategies to periods of rapid climate change. I show that even when farmers are able to manage risk ``optimally'' in light of past experience, unanticipated changes in the spatiotemporal pattern of rainfall can still lead to major food shortfalls. \\ \keywords{ Bayesian agents \and game against nature \and crop diversification \and drought risk \and rapid climate change \and } \end{abstract} \def\spacingset#1{\renewcommand{\baselinestretch}% {#1}\small\normalsize} \spacingset{1} \hypertarget{intro}{% \section{Introduction}\label{intro}} The distinctive climate and ecology of the Mediterranean basin afforded both challenges and opportunities to the earliest farming communities. Water was the primary limiting resource for these traditional agroecosystems. Agricultural droughts where growing season precipitation is low enough to cause crop failures are a constant threat. Precipitation is highly variable in space and time and droughts are difficult to predict with certainty. How were Neolithic farmers able to adapt to and even thrive in such an uncertain environment? Over the past 10,000 years, small-scale subsistence farmers have relied on a suite of strategies to maintain stable food supplies given uncertain rainfall. These strategies include practices like crop diversification, storage, mobility, and exchange (Halstead and O'Shea 1989). Crop diversification in particular is an excellent example of a widespread and effective risk management strategy that is well suited to Mediterranean agroecosystems. In the Mediterranean, land-use strategies involving a diversified portfolio of wheat and barley have been employed by the earliest sedentary farmers and continue to be used to this day (Gould 1963; Araus, Slafer, and Romagosa 1999; Abbo, Lev-Yadun, and Gopher 2010; Weiss and Zohary 2011; Marston 2011). Relying on a mix of food types with different climatic tolerances is an efficient way to maintain a robust food supply (Helmers, Yamoah, and Varvel 2001; Anderies 2006). Wheat is high yielding but drought sensitive, while barley is lower yielding but drought tolerant. Planting a mix of high-yield, high-risk and low-yield, low-risk crops, either in the same plot or in a combination of plots, is an effective means of diversifying the annual supply of staple food crops (Paut, Sabatier, and Tchamitchian 2019). By dynamically adjusting the ratio of wheat to barley in their fields, farmers can adapt to a variety local climate conditions with different drought risks. Risk-management strategies like crop diversification require farmers to make specific resource allocation decisions in response to specific environmental risks. As such, they are sensitive to variability in both the spatiotemporal patterns of risk and the ability of individual decision-makers to perceive and act on those patterns. Here, I focus on two main questions: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item How likely were droughts to occur each year in the eastern Mediterranean, and how did these risks change over the Holocene? \item How would Neolithic farmers have perceived these changing risks, and what were the consequences for Neolithic farmers' collective ability to manage them? \end{enumerate} To address these questions, I first use results from a long-term paleoclimate simulation to estimate the changing risk of agricultural droughts in the eastern Mediterranean over a 4,000-year period in the early to middle Holocene. Then, I use a simulated population of ``belief-based'' Bayesian agents to explore how well individual farmers would have been able to perceive these long-term changes in drought risks given their finite life experiences and limited capacity to process information. This computational approach allows for a more nuanced understanding of the vulnerability of risk-management strategies to unpredictable climatic variability. \hypertarget{sec:1}{% \section{Decision-making in a game against nature}\label{sec:1}} The basic decision-making problem facing a farmer seeking to diversify their crops can be thought of as a ``game against nature'' (Milnor 1952; Gould 1963; Agrawal and Heady 1968; Cassidy, Field, and Kirby 1971; Luce and Raiffa 1989). In the context of decision theory, the ``game'' is the farmer's decision of which crops to plant and in what proportions, given uncertainty in the weather (or the ``state of nature'') in a given year. Assume that farmers are working with a simplified representation of reality and intuitively solve an easier problem when faced with a complex real-world situation (Simon 1990). That is, rather than a continuum of possible states of nature, farmers only care about two categories -- years when it is too dry to plant wheat and years when it is not. \hypertarget{the-payoff-matrix}{% \subsection{The payoff matrix}\label{the-payoff-matrix}} This simplified decision problem can take the form of a payoff matrix in which the rows represent the moves of the farmer (wheat or barley) and the columns are the moves of nature (dry or normal, Table 1). The matrix is populated with realistic estimates of crop yields derived from isotopic analyses of grain residues at Neolithic sites (Araus, Slafer, and Romagosa 1999). However, the absolute yields are less important for the decision-making problem than the relative sums of each row and column. Given this payoff matrix, what strategies might a farmer use to ``win'' this game and maximize their yields? \begin{table} \centering \caption{Estimates of yield volume (t/ha) for prehistoric wheat and barley varieties derived from (Slafer et al. 1999). The absolute values here are less important for decision-making than the relative values across each row and column.} \begin{tabular}{|l|l|l|} \hline & Dry Year & Normal Year \\ \hline Barley Yield & 0.93 & 1.18 \\ \hline Wheat Yield & 0 & 1.60 \\ \hline \end{tabular} \end{table} There are several decision criteria a farmer might use in this situation (Milnor 1952; Agrawal and Heady 1968). A risk neutral farmer seeks only to maximize yields in a normal year by planting wheat and hoping for the best, as wheat is the highest yielding crop overall. But, this strategy risks starvation in drought years. Instead it is often rational to assume ``nature's'' moves are decided by sentient being bent on one's ruin, and to play strategically as if the worst-case scenario will occur (Gould 1963; Beckenkamp 2008). In the particular form of the wheat-barley game presented in Table 1, a risk sensitive farmer would assume that droughts are inevitable and plant barley to guarantee a minimum acceptable harvest even in the worst-case scenario. This cautious strategy is beneficial in situations of complete uncertainty, but farmers miss out on the high yields they would have received from planting wheat if a drought ultimately does not occur. \hypertarget{subjective-expected-yields}{% \subsection{Subjective expected yields}\label{subjective-expected-yields}} Playing this ``game'' over many years allows farmers to learn how probable each state of nature is to occur and to adjust their choices accordingly. This strategy is known as playing a game of ``fictitious play'' against nature. If the empirical frequency distribution of wet and dry years is known, the farmer can multiply the crop yields in Table 1 by the probability of each state of nature occurring and plant the crop with the highest \emph{expected} yield (Upton 1987). If the probability of a dry year is denoted \(\theta\), then the expected barley yield is \(\theta \times 0.93 + (1 - \theta) \times 1.18\) and the expected wheat yield is \(\theta \times 0 + (1 - \theta) \times 1.60\). Barley is favored if \(\theta\) is low and droughts are more likely, wheat is favored if \(\theta\) high, and the value of \(\theta\) at which expected barley and wheat yields are the same is known as the point of indifference (Figure \ref{fig:yields}). If a farmer is unsure whether \(\theta\) is above or below the point of indifference, they can plant a mix of crops that maximizes expected yields given their uncertainty in \(\theta\) (Luce and Raiffa 1989). Thus, the exact risk of a drought occurring in any given year and farmers' perceptions of that risk both have major implications for the decision-making process. \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/yields-1.pdf} \caption{\label{fig:yields}Expected wheat and barley yields under increasing drought risk. The point of indifference is highlighted, at which point planting either wheat or barley results in the same expected yield.} \end{figure} \hypertarget{sec:2}{% \section{Early to mid-Holocene drought risks}\label{sec:2}} In order to estimate drought risks in the past, present-day weather observations alone are insufficient. Precipitation varies not only from year-to-year, but also on centennial to millennial-scale time scales that are unresolved in the contemporary observational record. Climate dynamics are non-linear, non-stationary, and non-ergodic, which means sudden, unpredictable variability is the norm rather than the exception. Estimates of past climate variability derived from paleoclimate simulations provides a richer representation of not only the first order statistics of the climate system (e.g.~the mean and variance of precipitation) but also the higher order patterns such as the serial persistence of wet and dry years. These estimates present a more realistic picture of the inherent year-to-year uncertainty in the climate system and presents a more realistic challenge to simple risk-managing strategies that assume climatic risks are fixed. \hypertarget{paleoclimate-simulation}{% \subsection{Paleoclimate simulation}\label{paleoclimate-simulation}} Estimates of changing Holocene precipitation variability were derived from the TraCE-21ka paleoclimate simulation (He 2011). TraCE-21ka is a state-of-the-art simulation that uses a coupled atmosphere-ocean general circulation model to recreate the transient response of the global climate system to changes in the Earth's orbit and greenhouse gas concentrations from the Last Glacial Maximum to the present. The simulation generates physically consistent spatiotemporal climate dynamics, driven by current best estimates of external climate drivers (e.g.~Earth's orbit, greenhouse gasses, glacial meltwater flux). The model simulates these dynamics on a six-hourly timescale, and model outputs are archived at a monthly resolution. For this analysis, monthly TraCE-21ka precipitation outputs were extracted from the 3.75° grid cell covering Central Anatolia. This location was selected to capture climate variability typical for major Neolithic settlements in the region, such as Çatalhöyük and Aşıklı Höyük, and for the eastern Mediterranean more broadly. \hypertarget{estimating-drought-risks}{% \subsection{Estimating drought risks}\label{estimating-drought-risks}} Using the climate model output, I divided each model year into dry years and normal years. A dry year was any year where less than 300mm of rain fell during the wet season (October-May), the threshold below which wheat crops will generally fail (Wilkinson 1997) and a normal year was defined as any year above this threshold. Given the modeled patterns of normal and dry years, the ``objective'' climatic drought risk \(\hat\theta\) for any particular year was defined as the proportion of the previous 50 years that were dry years: \begin{equation} \hat\theta = \frac{\sum_{n=t-1}^{t-50} P_n < 300}{50}, \end{equation} where \(P_n\) is the growing season precipitation accumulation in millimeters for year \(n\). The simulated risks of crop failure due to drought ranged between 10\% and 46\% during the period from 9.5ka to 5.5ka, with a median risk of 24\% (Figure \ref{fig:risk}). On average, a Neolithic farmer in Central Anatolia could expect their wheat crops to fail two or three times a decade, punctuated by even drier periods in which wheat crops could be expected to fail roughly every other year. The simulation also reveals a long-term trend of decreasing drought risks, in particular with higher drought risk in the early Holocene giving way to lower drought risk in the middle Holocene. \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/risk-1.pdf} \caption{\label{fig:risk}Annual risk of wheat crop failure due to drought aggregated by fifty-year period. The dashed line indicates the level of risk beyond which one would plant barley over wheat to maximize subjective expected yields (after Figure 1, given the payoffs in Table 1).} \end{figure} The TraCE-21ka simulation confirms that drought risk in the eastern Mediterranean was non-stationary and, in fact, quite volatile during periods of climatic disruption in the early Holocene. This volatility would have had severe consequences for early farming communities whose risk-managing practices depended so heavily on accurately perceived local climatic risks. \hypertarget{sec:3}{% \section{Modeling risk perception}\label{sec:3}} In order to properly manage drought risk, a farmer must first be able to perceive that risk. Yet, a farmer's perception of risk reflects more than just the objective, empirical risk observable in the world around them (Tucker 2007; Tucker et al. 2013; Findlater, Satterfield, and Kandlikar 2019). Individual risk perception is inherently subjective, influenced by a person's past experience of dry and wet years as filtered through memory, and can reflect varying levels of uncertainty. Likewise, the distribution of individuals' perceived risks within a population influences the collective perception of drought risks and the potential aggregate societal-level response to those risks (Moussaïd 2013; Amaral and Javarone 2020). But how best to model risk perception at the individual level? \hypertarget{prior-beliefs-and-bayesian-agents}{% \subsection{Prior beliefs and Bayesian agents}\label{prior-beliefs-and-bayesian-agents}} The human brain does not record every bit of perceived information in memory, rather it stores a ``compact encoding'' of that information which it uses for future decision-making (Gallistel et al. 2014). Integrating the tools of agent-based modeling and Bayesian probability provide an elegant means of representing this process. A Bayesian agent is one whose subjective beliefs can be represented as a probability distribution over possible states of nature (Cushman and Macindoe 2009; Pope and Gimblett 2015). This approach has a clear computational efficiency for both modelers and decision-makers. Even if real-world decision makers are not Bayesian in a literal, mathematical, sense, the basic algorithmic problems faced by the brain and the solutions them which it has evolved reflect the same constraints on information processing in the minds of decision-makers (Bonawitz et al. 2011; Sanborn and Chater 2016; Khalvati et al. 2019). We can examine the dynamics of risk perception using a population of Bayesian agents that develop beliefs about the risk of drought through their personal experience of the weather. These perceptions impact their decision-making by altering their subjective expected crop yields from planting different crop mixes. Here, occurrence of a drought in any given year is treated as a draw from Bernoulli distribution (i.e.~a coin flip) with parameter \(\theta\) representing the drought probability. The beta distribution is a natural choice for representing knowledge about probabilities because it is constrained to fall between 0 and 1. Hence, an individual agent's prior belief about the plausible value of \(\theta\) can be represented as \begin{equation} \theta \sim \textrm{Beta}(\alpha, \,\beta), \end{equation} a beta distribution with the parameters \(\alpha\) and \(\beta\) corresponding to the number of dry and wet years previously experienced by that agent. Varying these two parameters thus allows for the representation of a variety of different personal experiences of drought risk. For example, if an agent recalls having lived through 5 dry years and 25 normal years, their prior belief about the chance of a drought occurring in the following year would be represented as a Beta(5, 25) distribution with mean value 0.2 equivalent to the empirical drought risk (\(\hat\theta\)) for that period. Using probabilities to represent agents' beliefs also allows for estimation of the uncertainty in those beliefs (Figure \ref{fig:prior-prob}). A simple Bayesian agent becomes more certain in their beliefs with age. For example, an agent who experienced 5 wet and 10 normal years and one who experienced 25 wet and 50 normal years would both agree that, on average, droughts occur 50\% of the time. Yet the latter agent would be much more certain in this belief because it is drawn from a larger range of experience (i.e.~a larger sample size). We can thus represent the exact information content of each individual agent's subjective experience of droughts using the diffusion of this prior belief. How, then, should an agent update its beliefs in light of new experience? \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/prior-prob-1.pdf} \caption{\label{fig:prior-prob}Development of an individual's perceived drought risk with time, assuming a fixed drought risk of 0.5. Beliefs are represented as beta distributions, and the increased certainty with age reflects the varying effective sample size of the beta prior.} \end{figure} \hypertarget{bayesian-updating-and-the-weight-of-past-experience}{% \subsection{Bayesian updating and the weight of past experience}\label{bayesian-updating-and-the-weight-of-past-experience}} As a Bayesian agent must be able to update its beliefs as it moves through time and observes each successive year's weather. It does so by comparing the information in this year's observation with the cumulative weight of their past experience. The agent combines its prior beliefs about drought risk with the likelihood of having observed a drought in the current year in order to generate a posterior distribution representing its updated beliefs about the world. Crucially, the strength of one's prior beliefs determines how much weight is given to new information (Figure \ref{fig:posterior}). For example, assume two agents -- one aged 15 and the other aged 50 -- who have only ever experienced a normal climate where droughts happen on average two out of every ten years. The mean value of \(\theta\) for both agents would thus be 20\%, but the degree of certainty varies because the older agent bases its inference on many more years of experience than the younger agent. Now, assume the climate suddenly changes such that the drought risk is doubled to 40\% for the next 25 years, not an uncommon occurrence in the simulation of early to mid-Holocene climate. Because the prior beliefs of the agents were so different, their subjective beliefs after the drought are also different even though both experienced the same climate. For young agents with weak priors, the information of each new year can thus strongly influence their beliefs. But older agents -- having more experience and stronger priors -- will be less likely to update their beliefs when comparing the information from a single year's weather with previous decades' worth of accumulated experience. Although neither agent may perceive the ``true'' climatic drought risk exactly, they nevertheless reflect perfectly rational beliefs about the world. Both agents have rational beliefs and differ only in their prior subjective beliefs. Their relative conservatism or flexibility are not biases, but rather varying perspectives on an inherently uncertain world. As was the case in the game of fictitious play discussed in Section \ref{sec:1}, a degree of conservatism is warranted when the environment is stable and risks do not vary. But, when the environment is volatile, the ability to change one's mind is crucial (Gallistel et al. 2014). Being too inflexible in one's priors can lead to decisions that are too optimistic when conditions really have changed for the worse. Conversely, inflexible priors can fail to take advantage of good conditions. \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/posterior-1.pdf} \caption{\label{fig:posterior}Change in perceived drought probability in an older (50) and younger (15) agent before and after a 25-year dry period.} \end{figure} \hypertarget{sec:4}{% \section{Risk management and the dynamics of risk perception}\label{sec:4}} The previous example established some basic intuitions for how agents with varying subjective beliefs can perceive risk differently. But it represents an idealized situation where agents update their beliefs retrospectively after a many-year dry period. Farmers, on the other hand, must update their planting decisions each year and continuously monitor the weather around them. The transient, year-to-year changes in perceived risks can thus have major consequences for how a population responds \emph{during} a dry period. How do Bayesian learners perform in such an uncertain, unpredictable time? To explore these dynamics further we can simulate random sequences of wet and dry years and see how a population of Bayesian agents responds to this sequence in time. A 75-year period of low (20\%) drought risks is punctuated by an abrupt 25-year dry period during which drought risk doubles. Here, observations are made about the world sequentially, and agents must continuously update their beliefs. As in the previous section, an agent's prior belief about drought risk is updated in light of new experience to generate a posterior perception of risk. Now, however, agents update their beliefs every year and thus only a year's worth of new information is incorporated into the agents' priors at each time step. This learning process is iterative, as an agent's posterior distribution in one year becomes their prior following year and the updating process repeats itself. Formally, this iterative process is known as ``online'' learning from Bernoulli observations, where ``online'' refers to the sequential, year-to-year updating (Bissiri and Walker 2010). The results of this simulation reveal the importance of the subjective experiences of individual decision makers on the population-level perception of risk (Figure \ref{fig:dynamics}a). In this learning environment, isolating signal from noise becomes critical for accurately perceiving evolving drought risks. All agents begin the simulation with diffuse subjective beliefs about drought risk, but as they begin to learn the collective subjective beliefs approaches the objective risk level. The agents are slow to update their beliefs during the 25-year dry period because drought years, while increasingly frequent, are still few enough not to outweigh their prior beliefs. Some agents may perceive gradual changes, but the population as a whole does not perceive the change in climate until more than a decade after it has begun. The agents are even slower to realize when the dry period is over. Indeed, the dry period made a strong enough imprint on the population's collective memory that the they perceive droughts to be much more probable than they actually are for decades after conditions have ameliorated once and for all. These biased perceptions have consequences for collective ability of the population to manage risk, because uncertainty in whether drought risks are above or below the indifference point informing the mix of crops to plant (Figure \ref{fig:dynamics}b). Once again, the skewed perceptions of increased drought risks last long after the dry period ends, reflected in a much higher ratio of barley to wheat -- a more cautious crop mix than is rational given the objective risk of drought in the environment. \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/dynamics-1.pdf} \caption{\label{fig:dynamics}A) Perceived drought risks in a population of Bayesian agents before and after a drought (grey band). The dashed red line represents the level of risk at which an agent is indifferent between planting wheat and barley. B) Crop mix over time, calculated based on the degree of dispersion above and below the indifference level in A.} \end{figure} \hypertarget{conclusion}{% \section{Conclusion}\label{conclusion}} In this chapter, I explored the consequences of individual heterogeneity in risk perception on the risk-management practices of a simulated Neolithic farming community. I used a long-term paleoclimate simulation over the eastern Mediterranean to estimate the changing risks of agricultural drought over a 4,000-year time period spanning the early to mid-Holocene. Over this time, wheat crops would have been expected to fail from drought between once every ten years to nearly once every two years. Changes in risk of such magnitude would have severely impacted Neolithic agroecosystems in the long run but would have been difficult for any individual farmer to perceive in the short run. To explore these dynamics, I simulated a population of ``belief-based'' Bayesian agents who use their subjective perception of annual drought risks to decide what mix of crops will best manage those risks. During periods of climatic stability, allowing past experiences to influence decision making helps farmers minimize the impacts of \emph{predictable} drought. But past experiences are less informative during periods of rapid climate change, and even farmers who manage risk ``optimally'' in light of their prior beliefs can experience food shortfalls. Cognitive diversity and life experience can be as or more important than the exact mix of crops planted for a population's long-term survival under extreme uncertainty. These dynamics have implications for understanding risk management and food production in the Neolithic. Two points arise from this model that clarify our understanding of the earliest farming communities in the Mediterranean and can inform future simulation work: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \textbf{Risk perception is difficult.} The climate system is inherently chaotic. Annual forecasts are fundamentally uncertain, even in the era of modern supercomputers and numerical weather prediction. For prehistoric farmers, this uncertainty would have been an existential challenge. With the end of the Last Glacial Maximum and the advent of the Holocene, precipitation became increasingly volatile on multiple time scales. This uncertainty in the objective drought risk is compounded by uncertainty in individual farmers' subjective perception of drought risk. Any individual would have experienced only a brief snapshot of this complex period. The rhythms of human lifespans are out of sync with the centennial to millennial scale oscillations in the climate system, so even the predictive value of one's own experience is itself unpredictable. This fundamental uncertainty would have influenced far more than the choice of which type of crops to plant and would have pervaded all kinds of decision-making under risk. \item \textbf{Individual risk perception has consequences for collective risk management.} Individuals of different ages may perceive the same dry period differently depending on their prior life experience. Younger individuals are more likely to perceive a run of dry years as a trend, rather than a temporary deviation from the norm, and older individuals are likely to do the opposite. By extension, the age structure of a population will influence how quickly it is able to perceive and adapt to a changing climate. For example, a young, fast growing population will have a different collective memory of a past drought event than an older population. Likewise, famines, warfare, epidemics, and other crises that afflict specific age classes will alter the time horizon of that population's collective memory. Individual heterogeneity in risk perceptions can thus play a key role in broader social responses to climatic risks. \end{enumerate} Relating collective knowledge to individual perception and cognition is essential for understanding human behavior in complex social-ecological systems (Beratan 2007; Pope and Gimblett 2015). Although social learning and cumulative cultural evolution have not been the focus of this chapter, these findings provide insight into the individual learning dynamics that underlay those broader social processes. This chapter has focused primarily on the physical and cognitive dimensions of risk and risk perception. The social context of risk and risk perception can be equally consequential (Rogers 1997). The balance of individual learning with social transmission determines the collective perception of risks and its impact on collective memory (Moussaïd 2013; Candia et al. 2019). Our finite lifespans ultimately limit the skill of individual learning over the long term, and cumulative cultural evolution is necessary for continued survival. \setlength{\parindent}{-0.5in} \setlength{\leftskip}{0.5in} \setlength{\parskip}{8pt} \hypertarget{references}{% \section{References}\label{references}} \noindent \hypertarget{refs}{} \leavevmode\hypertarget{ref-Abbo2010}{}% Abbo, Shahal, Simcha Lev-Yadun, and Avi Gopher. 2010. ``Yield stability: an agronomic perspective on the origin of Near Eastern agriculture.'' \emph{Vegetation History and Archaeobotany} 19 (2): 143--50. \url{https://doi.org/10.1007/s00334-009-0233-7}. \leavevmode\hypertarget{ref-Agrawal1968}{}% Agrawal, R. C., and E. O. Heady. 1968. ``Applications of Game Theory Models in Agriculture.'' \emph{Journal of Agricultural Economics} 19 (2): 207--18. \url{https://doi.org/10.1111/j.1477-9552.1968.tb01326.x}. \leavevmode\hypertarget{ref-Amaral2020}{}% Amaral, Marco A., and Marco A. Javarone. 2020. ``Heterogeneity in evolutionary games: an analysis of the risk perception.'' \emph{arXiv Preprint}, 1--7. \url{http://arxiv.org/abs/2002.06645}. \leavevmode\hypertarget{ref-Anderies2006}{}% Anderies, John Martin. 2006. ``Robustness, institutions, and large-scale change in social-ecological systems: the Hohokam of the Phoenix Basin.'' \emph{Journal of Institutional Economics} 2 (02): 133. \url{https://doi.org/10.1017/S1744137406000312}. \leavevmode\hypertarget{ref-Slafer1999}{}% Araus, J. L., G. A. Slafer, and I. Romagosa. 1999. ``Durum wheat and barley yields in antiquity estimated from 13C discrimination of archaeological grains: a case study from the Western Mediterranean Basin.'' \emph{Australian Journal of Plant Physiology} 26 (4): 345. \url{https://doi.org/10.1071/PP98141}. \leavevmode\hypertarget{ref-Beckenkamp2008}{}% Beckenkamp, Martin. 2008. ``Playing Strategically against Nature? Decisions Viewed from a Game-Theoretic Frame.'' Bonn. \url{https://doi.org/10.2139/ssrn.1275287}. \leavevmode\hypertarget{ref-Beratan2007}{}% Beratan, Kathi K. 2007. ``A cognition-based view of decision processes in complex social-ecological systems.'' \emph{Ecology and Society} 12 (1). \url{https://doi.org/10.5751/es-02103-120127}. \leavevmode\hypertarget{ref-Bissiri2010}{}% Bissiri, Pier Giovanni, and Stephen G. Walker. 2010. ``On Bayesian learning from Bernoulli observations.'' \emph{Journal of Statistical Planning and Inference} 140 (11): 3520--30. \url{https://doi.org/10.1016/j.jspi.2010.05.023}. \leavevmode\hypertarget{ref-Bonawitz2011}{}% Bonawitz, Elizabeth, Stephanie Denison, Annie Chen, Alison Gopnik, and Thomas L Griffiths. 2011. ``A simple sequential algorithm for approximating bayesian inference.'' In \emph{Proceedings of the 33rd Annual Conference of the Cognitive Science Society}, 2463--8. \url{papers://a6591aa3-238d-4eda-a3d9-0181a4361186/Paper/p1438\%20http://palm.mindmodeling.org/cogsci2011/papers/0582/paper0582.pdf}. \leavevmode\hypertarget{ref-Candia2019}{}% Candia, Cristian, C. Jara-Figueroa, Carlos Rodriguez-Sickert, Albert László Barabási, and César A. Hidalgo. 2019. ``The universal decay of collective memory and attention.'' \emph{Nature Human Behaviour} 3 (1): 82--91. \url{https://doi.org/10.1038/s41562-018-0474-5}. \leavevmode\hypertarget{ref-Cassidy1971}{}% Cassidy, R. G., C. A. Field, and M. J. L. Kirby. 1971. ``Random payoff games with partial information: One person games against nature.'' \emph{Revue Française d'informatique et de Recherche Opérationnelle} 5 (3): 3--17. \leavevmode\hypertarget{ref-Cushman2009}{}% Cushman, Fiery, and O Macindoe. 2009. ``The coevolution of punishment and prosociality among learning agents.'' \emph{Proceedings of the Annual Conference of the Cognitive Science Society} 31 (31): 1774--9. \url{http://141.14.165.6/CogSci09/papers/374/paper374.pdf}. \leavevmode\hypertarget{ref-Findlater2019}{}% Findlater, Kieran M., Terre Satterfield, and Milind Kandlikar. 2019. ``Farmers' Risk-Based Decision Making Under Pervasive Uncertainty: Cognitive Thresholds and Hazy Hedging.'' \emph{Risk Analysis}. \url{https://doi.org/10.1111/risa.13290}. \leavevmode\hypertarget{ref-Gallistel2014}{}% Gallistel, C. R., Monika Krishan, Ye Liu, Reilly Miller, and Peter E. Latham. 2014. ``The perception of probability.'' \emph{Psychological Review} 121 (1): 96--123. \url{https://doi.org/10.1037/a0035232}. \leavevmode\hypertarget{ref-GOULD1963a}{}% Gould, Peter R. 1963. ``Man Against His Environment: A Game Theoretic Framework.'' \emph{Annals of the Association of American Geographers} 53 (3): 290--97. \url{https://doi.org/10.1111/j.1467-8306.1963.tb00450.x}. \leavevmode\hypertarget{ref-Halstead1989}{}% Halstead, Paul, and John O'Shea. 1989. ``Introduction: cultural responses to risk and uncertainty.'' In \emph{Bad Year Economics: Cultural Responses to Risk and Uncertainty}, edited by Paul Halstead and John O'Shea, 1--7. Cambridge: Cambridge University Press. \url{https://doi.org/10.2307/281441}. \leavevmode\hypertarget{ref-He2011}{}% He, Feng. 2011. ``Simulating Transient Climate Evolution of the Last Deglatiation with CCSM3.'' PhD thesis, Madison. \leavevmode\hypertarget{ref-Helmers2001}{}% Helmers, G. A., C. F. Yamoah, and G. E. Varvel. 2001. ``Separating the impacts of crop diversification and rotations on risk.'' \emph{Agronomy Journal} 93 (6): 1337--40. \url{https://doi.org/10.2134/agronj2001.1337}. \leavevmode\hypertarget{ref-Khalvati2019}{}% Khalvati, Koosha, Seongmin A. Park, Saghar Mirbagheri, Remi Philippe, Mariateresa Sestito, Jean-Claude Dreher, and Rajesh P. N. Rao. 2019. ``Modeling Other Minds: Bayesian Inference Explains Human Choices in Group Decision Making.'' \emph{Science Advances} in press (November): aax8783. \url{https://doi.org/10.1101/419515}. \leavevmode\hypertarget{ref-Luce1989}{}% Luce, R Duncan, and Howard Raiffa. 1989. \emph{Games and decisions: Introduction and critical survey}. Courier Corporation. \leavevmode\hypertarget{ref-Marston2011}{}% Marston, John M. 2011. ``Archaeological markers of agricultural risk management.'' \emph{Journal of Anthropological Archaeology} 30 (2): 190--205. \url{https://doi.org/10.1016/j.jaa.2011.01.002}. \leavevmode\hypertarget{ref-Milnor1952}{}% Milnor, John. 1952. ``Games against nature.'' The RAND Corporation. \leavevmode\hypertarget{ref-Moussaid2013}{}% Moussaïd, Mehdi. 2013. ``Opinion formation and the collective dynamics of risk perception.'' \emph{PLoS ONE} 8 (12): 1--8. \url{https://doi.org/10.1371/journal.pone.0084592}. \leavevmode\hypertarget{ref-Paut2019}{}% Paut, Raphaël, Rodolphe Sabatier, and Marc Tchamitchian. 2019. ``Reducing risk through crop diversification: An application of portfolio theory to diversified horticultural systems.'' \emph{Agricultural Systems} 168: 123--30. \url{https://doi.org/10.1016/j.agsy.2018.11.002}. \leavevmode\hypertarget{ref-Pope2015}{}% Pope, Aloah J., and Randy Gimblett. 2015. ``Linking Bayesian and agent-based models to simulate complex social-ecological systems in semi-arid regions.'' \emph{Frontiers in Environmental Science} 3 (AUG): 1--9. \url{https://doi.org/10.3389/fenvs.2015.00055}. \leavevmode\hypertarget{ref-Rogers1997}{}% Rogers, George O. 1997. ``The dynamics of risk perception: How does perceived risk respond to risk events?'' \emph{Risk Analysis} 17 (6): 745--57. \url{https://doi.org/10.1111/j.1539-6924.1997.tb01280.x}. \leavevmode\hypertarget{ref-Sanborn2016}{}% Sanborn, Adam N., and Nick Chater. 2016. ``Bayesian Brains without Probabilities.'' \emph{Trends in Cognitive Sciences} 20 (12): 883--93. \url{https://doi.org/10.1016/j.tics.2016.10.003}. \leavevmode\hypertarget{ref-Simon1990}{}% Simon, Herbert A. 1990. ``Bounded Rationality.'' In \emph{Utility and Probability}, 15--18. London: Palgrave Macmillan UK. \url{https://doi.org/10.1007/978-1-349-20568-4_5}. \leavevmode\hypertarget{ref-Tucker2007}{}% Tucker, Bram. 2007. ``Perception of Interannual Covariation and Strategies for Risk Reduction among Mikea of Madagascar: Individual and Social Learning.'' \emph{Human Nature} 18 (2): 162--80. \url{https://doi.org/10.1007/s12110-007-9007-z}. \leavevmode\hypertarget{ref-Tucker2013}{}% Tucker, Bram, Jaovola Tombo, Tsiazonera, Patricia Hajasoa, Charlotte Nagnisaha, Vorisoa Rene Lahitoka, and Christian Zahatsy. 2013. ``Beyond Mean and Variance: Economic Risk Versus Perceived Risk of Farming, Foraging, and Fishing Activities in Southwestern Madagascar.'' \emph{Human Ecology} 41 (3): 393--407. \url{https://doi.org/10.1007/s10745-013-9563-2}. \leavevmode\hypertarget{ref-Upton1987}{}% Upton, Martin. 1987. \emph{African farm management}. CUP Archive. \leavevmode\hypertarget{ref-Weiss2011}{}% Weiss, Ehud, and Daniel Zohary. 2011. ``The Neolithic Southwest Asian founder crops their biology and archaeobotany.'' \emph{Current Anthropology} 52 (SUPPL. 4). \url{https://doi.org/10.1086/658367}. \leavevmode\hypertarget{ref-Wilkinson1997}{}% Wilkinson, Tony J. 1997. ``Environmental Fluctuations, Agricultural Production and Collapse: A View from Bronze Age Upper Mesopotamia.'' In \emph{Third Millenium Bc Climate Change and Old World Collapse}, edited by Hasan Nüzhet Dalfes, G. Kukla, and Harvey Weiss, 66--106. Springer. \url{http://books.google.com/books?id=LHIRAQAAIAAJ}. \bibliographystyle{Formato\_bibliografia\_Springer} \bibliography{bibliography.bib} \end{document}
{ "alphanum_fraction": 0.7940252344, "avg_line_length": 113.7932960894, "ext": "tex", "hexsha": "9b3616e0d0978f64e92333b1e2f3f3b2405612a0", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e4bd7e6efad30610a5635831c29141640b448bb6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "nick-gauthier/risk-perception", "max_forks_repo_path": "manuscript/manuscript.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e4bd7e6efad30610a5635831c29141640b448bb6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "nick-gauthier/risk-perception", "max_issues_repo_path": "manuscript/manuscript.tex", "max_line_length": 1417, "max_stars_count": null, "max_stars_repo_head_hexsha": "e4bd7e6efad30610a5635831c29141640b448bb6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "nick-gauthier/risk-perception", "max_stars_repo_path": "manuscript/manuscript.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 9782, "size": 40738 }
%% %% Automatically generated file from DocOnce source %% (https://github.com/hplgit/doconce/) %% %% %-------------------- begin preamble ---------------------- \documentclass[% oneside, % oneside: electronic viewing, twoside: printing final, % draft: marks overfull hboxes, figures with paths 10pt]{article} \listfiles % print all files needed to compile this document \usepackage{relsize,makeidx,color,setspace,amsmath,amsfonts,amssymb} \usepackage[table]{xcolor} \usepackage{bm,ltablex,microtype} \usepackage[pdftex]{graphicx} \usepackage[T1]{fontenc} %\usepackage[latin1]{inputenc} \usepackage{ucs} \usepackage[utf8x]{inputenc} \usepackage{lmodern} % Latin Modern fonts derived from Computer Modern % Hyperlinks in PDF: \definecolor{linkcolor}{rgb}{0,0,0.4} \usepackage{hyperref} \hypersetup{ breaklinks=true, colorlinks=true, linkcolor=linkcolor, urlcolor=linkcolor, citecolor=black, filecolor=black, %filecolor=blue, pdfmenubar=true, pdftoolbar=true, bookmarksdepth=3 % Uncomment (and tweak) for PDF bookmarks with more levels than the TOC } %\hyperbaseurl{} % hyperlinks are relative to this root \setcounter{tocdepth}{2} % levels in table of contents % --- fancyhdr package for fancy headers --- \usepackage{fancyhdr} \fancyhf{} % sets both header and footer to nothing \renewcommand{\headrulewidth}{0pt} \fancyfoot[LE,RO]{\thepage} % Ensure copyright on titlepage (article style) and chapter pages (book style) \fancypagestyle{plain}{ \fancyhf{} \fancyfoot[C]{{\footnotesize \copyright\ 1999-2021, "Computational Physics II FYS4411/FYS9411":"http://www.uio.no/studier/emner/matnat/fys/FYS4411/index-eng.html". Released under CC Attribution-NonCommercial 4.0 license}} % \renewcommand{\footrulewidth}{0mm} \renewcommand{\headrulewidth}{0mm} } % Ensure copyright on titlepages with \thispagestyle{empty} \fancypagestyle{empty}{ \fancyhf{} \fancyfoot[C]{{\footnotesize \copyright\ 1999-2021, "Computational Physics II FYS4411/FYS9411":"http://www.uio.no/studier/emner/matnat/fys/FYS4411/index-eng.html". Released under CC Attribution-NonCommercial 4.0 license}} \renewcommand{\footrulewidth}{0mm} \renewcommand{\headrulewidth}{0mm} } \pagestyle{fancy} % prevent orhpans and widows \clubpenalty = 10000 \widowpenalty = 10000 % --- end of standard preamble for documents --- % insert custom LaTeX commands... \raggedbottom \makeindex \usepackage[totoc]{idxlayout} % for index in the toc \usepackage[nottoc]{tocbibind} % for references/bibliography in the toc %-------------------- end preamble ---------------------- \begin{document} % matching end for #ifdef PREAMBLE \newcommand{\exercisesection}[1]{\subsection*{#1}} % ------------------- main content ---------------------- % ----------------- title ------------------------- \thispagestyle{empty} \begin{center} {\LARGE\bf \begin{spacing}{1.25} Project 2, Variational Monte Carlo studies of electronic systems. Deadline June 1, Spring 2021 \end{spacing} } \end{center} % ----------------- author(s) ------------------------- \begin{center} {\bf \href{{http://www.uio.no/studier/emner/matnat/fys/FYS4411/index-eng.html}}{Computational Physics II FYS4411/FYS9411}} \end{center} \begin{center} % List of all institutions: \centerline{{\small Department of Physics, University of Oslo, Norway}} \end{center} % ----------------- end author(s) ------------------------- % --- begin date --- \begin{center} Apr 7, 2021 \end{center} % --- end date --- \vspace{1cm} \subsection*{Introduction} The aim of this project is to use the Variational Monte Carlo (VMC) method to evaluate the ground state energy, onebody densities, expectation values of the kinetic and potential energies and single-particle denisties of quantum dots with $N=2$, $N=6$, $N=12$ and $N=20$ electrons. These are so-called closed shell systems. \subsection*{Theoretical background and description of the physical system} We consider a system of electrons confined in a pure two-dimensional isotropic harmonic oscillator potential, with an idealized total Hamiltonian given by \begin{equation} \label{eq:finalH} \hat{H}=\sum_{i=1}^{N} \left( -\frac{1}{2} \nabla_i^2 + \frac{1}{2} \omega^2r_i^2 \right)+\sum_{i<j}\frac{1}{r_{ij}}, \end{equation} where natural units ($\hbar=c=e=m_e=1$) are used and all energies are in so-called atomic units a.u. We will study systems of many electrons $N$ as functions of the oscillator frequency $\omega$ using the above Hamiltonian. The Hamiltonian includes a standard harmonic oscillator part \begin{equation*} \hat{H}_0=\sum_{i=1}^{N} \left( -\frac{1}{2} \nabla_i^2 + \frac{1}{2} \omega^2r_i^2 \right), \end{equation*} and the repulsive interaction between two electrons given by \begin{equation*} \hat{H}_1=\sum_{i<j}\frac{1}{r_{ij}}, \end{equation*} with the distance between electrons given by $r_{ij}=\vert \bm{r}_1-\bm{r}_2\vert$. We define the modulus of the positions of the electrons (for a given electron $i$) as $r_i = \sqrt{r_{i_x}^2+r_{i_y}^2}$. \paragraph{Project 2 a):} In exercises a-f we will deal only with a system of two electrons in a quantum dot with a frequency of $\hbar\omega = 1$. The reason for this is that we have exact closed form expressions for the ground state energy from Taut's work for selected values of $\omega$, see M. Taut, Phys. Rev. A \textbf{48}, 3561 (1993). The energy is given by $3$ a.u. (atomic units) when the interaction between the electrons is included. If only the harmonic oscillator part of the Hamiltonian is included, the so-called unperturbed part, \begin{equation*} \hat{H}_0=\sum_{i=1}^{N} \left( -\frac{1}{2} \nabla_i^2 + \frac{1}{2} \omega^2r_i^2 \right),\end{equation*} the energy is $2$ a.u. The wave function for one electron in an oscillator potential in two dimensions is \begin{equation*} \phi_{n_x,n_y}(x,y) = A H_{n_x}(\sqrt{\omega}x)H_{n_y}(\sqrt{\omega}y)\exp{(-\omega(x^2+y^2)/2}. \end{equation*} The functions $H_{n_x}(\sqrt{\omega}x)$ are so-called Hermite polynomials, discussed in connection with project 1 while $A$ is a normalization constant. For the lowest-lying state we have $n_x=n_y=0$ and an energy $\epsilon_{n_x,n_y}=\omega(n_x+n_y+1) = \omega$. Convince yourself that the lowest-lying energy for the two-electron system is simply $2\omega$. The unperturbed wave function for the ground state of the two-electron system is given by \begin{equation*} \Phi(\bm{r}_1,\bm{r}_2) = C\exp{\left(-\omega(r_1^2+r_2^2)/2\right)}, \end{equation*} with $C$ being a normalization constant and $r_i = \sqrt{r_{i_x}^2+r_{i_y}^2}$. Note that the vector $\bm{r}_i$ refers to the $x$ and $y$ position for a given particle. What is the total spin of this wave function? Find arguments for why the ground state should have this specific total spin. \paragraph{Project 2 b):} We want to perform a Variational Monte Carlo calculation of the ground state of two electrons in a quantum dot well with different oscillator energies, assuming total spin $S=0$ using the Hamiltonian of Eq. (\ref{eq:finalH}). Our trial wave function which has the following form \begin{equation} \psi_{T}(\bm{r}_1,\bm{r}_2) = C\exp{\left(-\alpha\omega(r_1^2+r_2^2)/2\right)} \exp{\left(\frac{ar_{12}}{(1+\beta r_{12})}\right)}, \label{eq:trial} \end{equation} where $a$ is equal to one when the two electrons have anti-parallel spins and $1/3$ when the spins are parallel. Finally, $\alpha$ and $\beta$ are our variational parameters. Note well the dependence on $\alpha$ for the single-particle part of the trial function. It is important to remember this when you use higher-order Hermite polynomials. Find the analytical expressions for the local energy. \paragraph{Project 2 c):} Your task is to perform a Variational Monte Carlo calculation using the Metropolis algorithm to compute the integral \begin{equation} \langle E \rangle = \frac{\int d\bm{r}_1d\bm{r}_2\psi^{\ast}_T(\bm{r}_1,\bm{r}_2)\hat{H}(\bm{r}_1,\bm{r}_2)\psi_T(\bm{r}_1,\bm{r}_2)} {\int d\bm{r}_1d\bm{r}_2\psi^{\ast}_T(\bm{r}_1,\bm{r}_2)\psi_T(\bm{r}_1,\bm{r}_2)}. \end{equation} Compute the expectation value of the energy using both the analytical expression for the local energy and numerical derivation of the kinetic energy. Compare the time usage between the two approaches. Perform these calculations without importance sampling and also without the Jastrow factor. For the calculations without the Jastrow factor and repulsive Coulomb potential, your energy should equal 2.0 a.u. and your variance should be exactly equal to zero. \paragraph{Project 2 d):} Add now importance sampling and repeat the calculations from the previous exercise but use only the analytical expression for the local energy. Perform also a blocking analysis in order to obtain the optimal standard deviation. Compare your results with the those without importance sampling and comment your results. \paragraph{Project 2 e):} Using either the steepest descent method or the conjugate gradient method, find the optimal variational parameters and perform your Monte Carlo calculations using these. In addition, you should parallelize your program using MPI and set it up to run on Smaug. \paragraph{Project 2 f):} Finally, we wil now analyze and interpret our results for the two-electron systems. Find the energy minimum and discuss your results compared with the analytical solution from Taut's work, see reference [1] below. Compute also the mean distance $r_{12}=\vert \bm{r}_1-\bm{r}_2\vert$ (with $r_i = \sqrt{r_{i_x}^2+r_{i_y}^2}$) between the two electrons for the optimal set of the variational parameters. With the optimal parameters for the ground state wave function, compute the onebody density. Discuss your results and compare the results with those obtained with a pure harmonic oscillator wave functions. Run a Monte Carlo calculations without the Jastrow factor as well and compute the same quantities. How important are the correlations induced by the Jastrow factor? Compute also the expectation value of the kinetic energy and potential energy using $\omega=0.01$, $\omega=0.05$, $\omega=0.1$, $\omega=0.5$ and $\omega=1.0$. Comment your results. Hint, think of the virial theorem. \paragraph{Project 2 g):} The previous exercises have prepared you for extending your calculational machinery to other systems. Here we will focus on quantum dots with $N=6$ and $N=12$ electrons. The new item you need to pay attention to is the calculation of the Slater Determinant. This is an additional complication to your VMC calculations. If we stick to harmonic oscillator like wave functions, the trial wave function for say an $N=6$ electron quantum dot can be written as \begin{equation} \psi_{T}(\bm{r}_1,\bm{r}_2,\dots, \bm{r}_6) = Det\left(\phi_{1}(\bm{r}_1),\phi_{2}(\bm{r}_2), \dots,\phi_{6}(\bm{r}_6)\right) \prod_{i<j}^{6}\exp{\left(\frac{a r_{ij}}{(1+\beta r_{ij})}\right)}, \end{equation} where $Det$ is a Slater determinant and the single-particle wave functions are the harmonic oscillator wave functions for the $n_x=0,1$ and $n_y=0,1$ orbitals. Similarly, for the $N=12$ quantum dot, the trial wave function can take the form \begin{equation} \psi_{T}(\bm{r}_1,\bm{r}_2, \dots,\bm{r}_{12}) = Det\left(\phi_{1}(\bm{r}_1),\phi_{2}(\bm{r}_2), \dots,\phi_{12}(\bm{r}_{12})\right) \prod_{i<j}^{12}\exp{\left(\frac{ar_{ij}}{(1+\beta r_{ij})}\right)}, \end{equation} In this case you need to include the $n_x=2$ and $n_y=2$ wave functions as well. Observe that $r_i = \sqrt{r_{i_x}^2+r_{i_y}^2}$. Use the Hermite polynomials defined in project 1. Reference [5] gives benchmark results for closed-shell systems up to $N=20$. Write a function which sets up the Slater determinant. Find the Hermite polynomials which are needed for $n_x=0,1,2$ and obviously $n_y$ as well. Compare the results you obtain with those from project 1. Compute the ground state energies of quantum dots for $N=6$ and $N=12$ electrons, following the same set up as in the previous exercises for $\omega=0.01$, $\omega=0.05$, $\omega=0.1$, $\omega=0.5$, and $\omega=1.0$. The calculations should include parallelization, blocking, importance sampling and energy minimization using the conjugate gradient approach or similar approaches. To test your Slater determinant code, you should reproduce the unperturbed single-particle energies when the electron-electron repulsion is switched off. Convince yourself that the unperturbed ground state energies for $N=6$ is $10\omega$ and for $N=12$ we obtain $28\omega$. What is the expected total spin of the ground states? \paragraph{Project 2 h):} With the optimal parameters for the ground state wave function, compute again the onebody density. Discuss your results and compare the results with those obtained with a pure harmonic oscillator wave functions. Run a Monte Carlo calculations without the Jastrow factor as well and compute the same quantities. How important are the correlations induced by the Jastrow factor? Compute also the expectation value of the kinetic energy and potential energy using $\omega=0.01$, $\omega=0.05$, $\omega=0.1$, $\omega=0.5$, and $\omega=1.0$. Comment your results. \paragraph{Project 2 i):} The last exercise is a performance analysis of your code(s) for the case of $N=6$ electrons. Make a performance analysis by timing your serial code with and without vectorization. Perform several runs with the same number of Monte carlo cycles and compute an average timing analysis with and without vectorization. Comment your results. Use at least $10^6$ Monte Carlo samples. Compare thereafter your serial code(s) with the speedup you get by parallelizing your code, running either OpenMP or MPI or both. Do you get a near $100\%$ speedup with the parallel version? Comment again your results and perform timing benchmarks several times in order to extract an average performance time. \paragraph{Literature.} \begin{enumerate} \item M. Taut, Phys. Rev. A \textbf{48}, 3561 - 3566 (1993). \item B. L. Hammond, W. A. Lester and P. J. Reynolds, \emph{Monte Carlo methods in Ab Initio Quantum Chemistry}, World Scientific, Singapore, 1994, chapters 2-5 and appendix B. \item B. H. Bransden and C. J. Joachain, Physics of Atoms and molecules, Longman, 1986. Chapters 6, 7 and 9. \item A. K. Rajagopal and J. C. Kimball, see Phys. Rev. B \textbf{15}, 2819 (1977). \item M. L. Pedersen, G. Hagen, M. Hjorth-Jensen, S. Kvaal, and F. Pederiva, Phys. Rev. B \textbf{84}, 115302 (2011) \end{enumerate} \noindent \subsection*{Introduction to numerical projects} Here follows a brief recipe and recommendation on how to write a report for each project. \begin{itemize} \item Give a short description of the nature of the problem and the eventual numerical methods you have used. \item Describe the algorithm you have used and/or developed. Here you may find it convenient to use pseudocoding. In many cases you can describe the algorithm in the program itself. \item Include the source code of your program. Comment your program properly. \item If possible, try to find analytic solutions, or known limits in order to test your program when developing the code. \item Include your results either in figure form or in a table. Remember to label your results. All tables and figures should have relevant captions and labels on the axes. \item Try to evaluate the reliabilty and numerical stability/precision of your results. If possible, include a qualitative and/or quantitative discussion of the numerical stability, eventual loss of precision etc. \item Try to give an interpretation of you results in your answers to the problems. \item Critique: if possible include your comments and reflections about the exercise, whether you felt you learnt something, ideas for improvements and other thoughts you've made when solving the exercise. We wish to keep this course at the interactive level and your comments can help us improve it. \item Try to establish a practice where you log your work at the computerlab. You may find such a logbook very handy at later stages in your work, especially when you don't properly remember what a previous test version of your program did. Here you could also record the time spent on solving the exercise, various algorithms you may have tested or other topics which you feel worthy of mentioning. \end{itemize} \noindent \subsection*{Format for electronic delivery of report and programs} The preferred format for the report is a PDF file. You can also use DOC or postscript formats or as an ipython notebook file. As programming language we prefer that you choose between C/C++, Fortran2008 or Python. The following prescription should be followed when preparing the report: \begin{itemize} \item Use canvas to hand in your projects, log in at \href{{http://canvas.uio.no}}{\nolinkurl{http://canvas.uio.no}} with your normal UiO username and password. \item Upload \textbf{only} the report file! For the source code file(s) you have developed please provide us with your link to your github domain. The report file should include all of your discussions and a list of the codes you have developed. The full version of the codes should be in your github repository. \item In your github repository, please include a folder which contains selected results. These can be in the form of output from your code for a selected set of runs and input parameters. \item Still in your github make a folder where you place your codes. \item In this and all later projects, you should include tests (for example unit tests) of your code(s). \item Comments from us on your projects, approval or not, corrections to be made etc can be found under your Devilry domain and are only visible to you and the teachers of the course. \end{itemize} \noindent Finally, we encourage you to work two and two together. Optimal working groups consist of 2-3 students. You can then hand in a common report. % ------------------- end of main content --------------- \end{document}
{ "alphanum_fraction": 0.7355766592, "avg_line_length": 47.0723514212, "ext": "tex", "hexsha": "490a167878e284204af60fd05c3fc500532b3c9e", "lang": "TeX", "max_forks_count": 54, "max_forks_repo_forks_event_max_datetime": "2022-03-07T10:44:14.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-09T10:02:00.000Z", "max_forks_repo_head_hexsha": "9cf10ffb2557cc73c4e6bab060d53690ee39426f", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "Schoyen/ComputationalPhysics2", "max_forks_repo_path": "doc/Projects/2021/Project2/Project2VMC/pdf/Project2VMC.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "9cf10ffb2557cc73c4e6bab060d53690ee39426f", "max_issues_repo_issues_event_max_datetime": "2020-02-08T13:15:42.000Z", "max_issues_repo_issues_event_min_datetime": "2020-01-18T10:43:38.000Z", "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "Schoyen/ComputationalPhysics2", "max_issues_repo_path": "doc/Projects/2021/Project2/Project2VMC/pdf/Project2VMC.tex", "max_line_length": 405, "max_stars_count": 87, "max_stars_repo_head_hexsha": "9cf10ffb2557cc73c4e6bab060d53690ee39426f", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "Schoyen/ComputationalPhysics2", "max_stars_repo_path": "doc/Projects/2021/Project2/Project2VMC/pdf/Project2VMC.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-28T07:11:53.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-21T08:29:56.000Z", "num_tokens": 4961, "size": 18217 }
\chapter{Preface to the Second Edition, 2013} `Forest Path' was first printed in 1999 and originally planned as the first edition of a Wat Pah Nanachat newsletter. Over-enthusiasm and considerable proliferation resulted in a one-off book publication which more or less coincided with the monastery's twenty-fifth anniversary. Since then we have been surprised by the number of requests to reprint this collection of little essays, talks and anecdotes about life in Wat Pah Nanachat as it was at that time. Apart from the formal Dhamma talks the book contains, we were hesitant at first to reprint its other contents, the `old stories' and personal accounts by then younger authors. But it was those other parts which in fact added much of the book's authentic flavour and made so many people find it beneficial and joyful to read. Hence it appears that our hesitation over reprinting this book was due to a concern that the snapshots of life in the monastery offered by those individuals at that time had become dated. In fact, though, on recently going through the various contributions again, we found that many of those snapshots could still be written today -- there would be a new cast of players, but the atmosphere experienced then, and communicated so vividly in the old `outdated' accounts, is still very similar. So we are happy to realize that when we take the wholehearted present-day Dhamma approach of genuinely experiencing what is happening at a particular \emph{saṃsāric} moment in life, we also embark on something timeless. Beyond the specific details, all our little hopes and sorrows around daily life in the monastery and the higher values and principles of our life -- the Buddha's core teachings -- become apparent in these snapshots. Better still, the details of an individual's unique experiences are in fact exemplary: they are transferable and thus pass the test of time. Thinking in these terms gives the monks and novices at Wat Pah Nanachat extra encouragement, as in many ways it makes the limitations and the suffering inevitably entailed by each one's specific experience worthwhile. Once we became aware of all the `good examples' already to be found in the original `Forest Path', the option of rewriting some of the material in a 2012 setting as a `Forest Path II' suddenly seemed pointless. With \emph{saṃsāra} essentially repeating the same old drama endlessly anyway, we thought that simply revisiting the old setting once more with a simple reissue of the original would be a much more effective and honest choice than reworking the text and suggesting it was `new'. It is also true tradition to go back a little into the past, with the hope of taking the opportunity to realize some timeless truths. So the material in this new edition of `Forest Path' is an almost exact reprint of the old 1999 version, although with editorial assistance we have taken the opportunity to correct some of the punctuation and grammar. We have also created a few links to the present-day situation (which seems fair enough, considering that a big impersonal monastic community only becomes a reality when it is embodied in some specific individuals). For us now, the present dwellers in the monastery, who in most cases did not arrive there in time to meet the great example Luang Por Chah in person, it seems most affirming that the principles of monastic life as his disciples still permeate the scenes that each new generation of monks, novices, \emph{anāgārikas} and visitors has been continuously experiencing at Wat Pah Nanachat during the past thirty-five years. So please come and see our monastery for yourself, and get your free real-time update on our community by practising the present-moment Dhamma with us. In the meantime, we hope you will enjoy viewing these historical snapshots again. \bigskip {\par\raggedleft Yours in the Dhamma,\\ On behalf of the Sangha of Wat Pah Nanachat, Kevali Bhikkhu\\ Abbot, 2013 \par} \vfill \section{Note} Please be aware that the authors' monastic titles when the original essays were written have been kept (e.g. `Sāmaṇera' or `Tan'). You will see in the short summaries added after each essay, which explain what has happened since then, that almost all of them are now senior monks whom we would usually honour by calling them `Ajahns', teachers.
{ "alphanum_fraction": 0.8029499885, "avg_line_length": 49.8735632184, "ext": "tex", "hexsha": "75fabbcd4fe80378edd319238271f0007d329879", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8c70e360908fe326f497be340e6e720bce3150b4", "max_forks_repo_licenses": [ "CC-BY-3.0" ], "max_forks_repo_name": "profound-labs/forest-path", "max_forks_repo_path": "manuscript/tex/preface-second-edition-2013.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8c70e360908fe326f497be340e6e720bce3150b4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-3.0" ], "max_issues_repo_name": "profound-labs/forest-path", "max_issues_repo_path": "manuscript/tex/preface-second-edition-2013.tex", "max_line_length": 72, "max_stars_count": 1, "max_stars_repo_head_hexsha": "8c70e360908fe326f497be340e6e720bce3150b4", "max_stars_repo_licenses": [ "CC-BY-3.0" ], "max_stars_repo_name": "profound-labs/forest-path", "max_stars_repo_path": "manuscript/tex/preface-second-edition-2013.tex", "max_stars_repo_stars_event_max_datetime": "2017-05-14T17:09:16.000Z", "max_stars_repo_stars_event_min_datetime": "2017-05-14T17:09:16.000Z", "num_tokens": 1007, "size": 4339 }
\documentclass[conference]{IEEEtran} \IEEEoverridecommandlockouts % The preceding line is only needed to identify funding in the first footnote. If that is unneeded, please comment it out. \usepackage{cite} \usepackage[portuges,brazil,english]{babel} \usepackage{amsmath,amssymb,amsfonts} \usepackage{algorithmic} \usepackage{graphicx} \usepackage{textcomp} \def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} \begin{document} \title{MC886 - Machine Learning Assignment #1 - Linear Regression} \author{\IEEEauthorblockN{Fábio Tanniguchi} \IEEEauthorblockA{ RA \\ email address} \and \IEEEauthorblockN{Felipe Caminada} \IEEEauthorblockA{ RA 140604 \\ [email protected]}} \maketitle \section{Introduction} The motivation behind this assignment is to reinforce the contents studied in class about Linear Regression and the basics of Machine Learning. The task at hand is to find a Linear Regression based model to predict the release year of songs ranging from 1922 to 2011. \section{Activities} The state-of-the-art research (about prior work for solving the same problem). \section{Proposed Solutions} The proposed solutions for the selected problem. \section{Experiments and Discussion} The experiments carried out and the obtained results. \section{Conclusions and Future Work} The main conclusions of the work as well as some future directions for other people interested in continuing this work. \begin{thebibliography}{00} \bibitem{b1} Christopher M. Bishop. ``Pattern Recognition and Machine Learning''. Springer-Verlag New York, Inc., Secaucus, NJ, USA, 2006. \end{thebibliography} \end{document}
{ "alphanum_fraction": 0.7876631079, "avg_line_length": 30.6545454545, "ext": "tex", "hexsha": "1c769fee55433a65d00377bc509917a7fd30b2d3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1f65a72e32fe2bf013d7f1659a45cd30a99a8e54", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "fabiotanniguchi/MC886", "max_forks_repo_path": "report/report-model/report-model.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1f65a72e32fe2bf013d7f1659a45cd30a99a8e54", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "fabiotanniguchi/MC886", "max_issues_repo_path": "report/report-model/report-model.tex", "max_line_length": 143, "max_stars_count": null, "max_stars_repo_head_hexsha": "1f65a72e32fe2bf013d7f1659a45cd30a99a8e54", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "fabiotanniguchi/MC886", "max_stars_repo_path": "report/report-model/report-model.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 448, "size": 1686 }
%------------------------- % Resume in Latex % Author : Pradeep Dantuluri % License : MIT %------------------------ \documentclass[letterpaper,11pt]{article} \usepackage{latexsym} \usepackage[empty]{fullpage} \usepackage{titlesec} \usepackage{marvosym} \usepackage[usenames,dvipsnames]{color} \usepackage{verbatim} \usepackage{enumitem} \usepackage[hidelinks]{hyperref} \usepackage{fancyhdr} \usepackage[english]{babel} \pagestyle{fancy} \fancyhf{} % clear all header and footer fields \fancyfoot{} \renewcommand{\headrulewidth}{0pt} \renewcommand{\footrulewidth}{0pt} % Adjust margins \addtolength{\oddsidemargin}{-0.5in} \addtolength{\evensidemargin}{-0.5in} \addtolength{\textwidth}{1in} \addtolength{\topmargin}{-.5in} \addtolength{\rightmargin}{-.5in} \addtolength{\textheight}{1in} \urlstyle{same} \raggedbottom \raggedright \setlength{\tabcolsep}{0in} % Sections formatting \titleformat{\section}{ \vspace{-4pt}\scshape\raggedright\large }{}{0em}{}[\color{black}\titlerule \vspace{-5pt}] %------------------------- % Custom commands \newcommand{\resumeItem}[2]{ \item\small{ \textbf{#1}{: #2 \vspace{-2pt}} } } \newcommand{\resumeSubheading}[4]{ \vspace{-1pt}\item \begin{tabular*}{0.97\textwidth}[t]{l@{\extracolsep{\fill}}r} \textbf{#1} & #2 \\ \textit{\small#3} & \textit{\small #4} \\ \end{tabular*}\vspace{-5pt} } \newcommand{\pubItem}[3]{ \vspace{-1pt}\item \begin{tabular*}{0.97\textwidth}[t]{l@{\extracolsep{\fill}}r} \textbf{#1} & \\ \textit{\small#2} & \\ \small#3 & \\ \end{tabular*}\vspace{-5pt} } \newcommand{\resumeSubItem}[2]{\resumeItem{#1}{#2}\vspace{-4pt}} \renewcommand{\labelitemii}{$\circ$} \newcommand{\resumeSubHeadingListStart}{\begin{itemize}[leftmargin=*]} \newcommand{\resumeSubHeadingListEnd}{\end{itemize}} \newcommand{\resumeItemListStart}{\begin{itemize}} \newcommand{\resumeItemListEnd}{\end{itemize}\vspace{-5pt}} %------------------------------------------- %%%%%% CV STARTS HERE %%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} %----------HEADING----------------- \begin{tabular*}{\textwidth}{l@{\extracolsep{\fill}}r} \textbf{\href{https://pradeep122.github.io}{\Large Pradeep Dantuluri}} & Mobile : +91 897-817-3535 \\ Email : \href{mailto:[email protected]}{[email protected]} & Skype : pradeep011 \\ \end{tabular*} \vspace{7pt} Senior Developer/Architect with 10+ years of experience in establishing, managing and leading technical teams. %--------PROGRAMMING SKILLS------------ \vspace{-14pt} \section{} \resumeSubHeadingListStart \item{ \textbf{Expertise}{: Functional Programming, Micro Services, DevOps, Project Management, Team Building } } \item{ \textbf{Areas of Interest}{: Distributed Systems, High Scalability, Security } } \item{ \textbf{Technologies}{: Java, Scala, RxJava, Play, AWS, Google Cloud, Docker, Kubernetes, Prometheus, Vault, Keycloak, MySQL, MongoDB, ElasticSearch, Redis, RabbitMQ, Android, Javascript, NodeJS, Angular } } \resumeSubHeadingListEnd %-----------EXPERIENCE----------------- \section{Experience} \resumeSubHeadingListStart \resumeSubheading {Yantranet}{Hyderabad, India} {Lead Engineer \& Architect}{April 2012 - Present} \resumeItemListStart \item{Yantranet is a fully bootstrapped company, and have been cash-flow positive since day one. We are a part of \href{http://10000startups.com/our-startups}{NASSCOM 10K incubator} and \href{http://xlr8ap.com}{XLR8 AP accelerator}. Our business is based out of CT, USA, with clients all over the country. We have a development center in Hyderabad, India} \item{As an early hire, I am part of the core Management team, responsible for back-end stack, hiring, and ensuring high availability at a 24/7 business} \item{I am the Architect for all consumer-facing products at Yantranet such as YantraPlatform, YantraSign and ScreenVive. I was leading the team through all stages of development cycle. From feature planning, design, implementation and support} \item{I am currently the Lead Engineer for back-end services and responsible for software development and system reliabilty } \item{I was the Lead Engineer for our Android , Web Apps before hiring teams and handing over the software development responsibilities. } \item{Introduced Functional and Reactive Programming paradigms to the team and helped migrate to RxJava and RxJs} \item{Led the migration to a Micro Service Architecture and Kubernetes Deployments within the company, which vastly improved our deployment cycles} \item{Migrated from a bespoke auth server to RBAC based Keycloak for fine grained authentication and authorization, including support for OAuth2.0 and OpenID Connect for 3rd party authentication} \item{I led a team to setup \& maintain a comprehensive DevOps pipeline using Bitbucket, Jenkins, Docker, Kubernetes, Vault, Prometheus, Grafana} \resumeItemListEnd \textbf{Products} \resumeItemListStart \resumeItem{YantraPlatform} {A bespoke IoT platform designed for media intensive applications such as Digital Signage and Kiosks. Allows for monitoring and pixel-level management of thousands of screens spread across the world. \href{https://www.yantranet.com}{https://www.yantranet.com}} \resumeItem{YantraSign} {A Digital Signage solution that leverages the YantraPlatorm to deliver seamless control over content delivery, across a network of screens. \href{https://yantrasign.com}{https://yantrasign.com}} \resumeItem{ScreenVive} {Content as a Service for your Digital Screens. Access a variety of configurable content feeds ranging from news, entertainment, trivia etc to be used on your screens. \href{https://screenvive.com}{https://screenvive.com}} \resumeItemListEnd \resumeSubheading {DERI, NUI Galway}{Galway, Ireland} {Research Associate}{August 2008 - September 2011} \resumeItemListStart \resumeItem{Digi.me} {An EU funded research project to develop an Intelligent Trust-enhancing Store of a User’s personal information sphere in digital and social environments.} \resumeItem{Nepomuk} {A Social Semantic Desktop built on top of KDE which enables a deeper inter-connection of data from various applications using RDF other Semantic Web Technologies } \resumeItem{Publications} {I co-authored 4 research papers and a journal in the field of NLP and Semantic Web} \resumeItem{Technologies Used} {Java, Python, GWT, RDF, OWL, Research, Academic Writing } \resumeItemListEnd \resumeSubheading {University of Trento}{Trento, Italy} {Research Intern}{December 2007 - June 2008} \resumeItemListStart \resumeItem{Context based Word Similarity using Wikipedia} {An EASTWeb funded research project exploring the use of Wikipedia for Semantic Analysis. I developed a Term Similarity module using page links, page content and category links within Wikipedia} \resumeItem{Technologies Used} {Java, MySQL, XML, J2EE } \resumeItemListEnd \resumeSubHeadingListEnd %-----------PROJECTS----------------- \section{Side Projects} \resumeSubHeadingListStart \resumeSubheading {Tickscore}{Hyderabad, India} {Technical Co-Founder}{January 2017 -- April 2019} \resumeItemListStart \item {An online service helping students prepare for the Civil Services Examination in India. Students could practise either online or using an Android App, against a well-curated set of questions and get detailed analytics on their progress.} \item { I designed and developed the backend services.} \resumeItem{Technologies Used} {Javascript, NodeJS, ReactJS, ReactNative } \resumeItemListEnd \resumeSubheading {InMins}{Hyderabad, India} {Technical Co-Founder}{January 2011 -- April 2012} \resumeItemListStart \item{A food delivery app created long before mobile apps became prevalent. It was a web-based service, which catered to foodies in Hyderabad, India.} \item{I desinged and developed the customer-facing web app and an administration console, for managing daily operations. } \resumeItem{Technologies Used} {Java, Spring, Javascript, Bootstrap, Backbone, JQuery } \resumeItemListEnd \resumeSubHeadingListEnd %-----------EDUCATION----------------- \section{Education} \resumeSubHeadingListStart \resumeSubheading {Indian Institute of Information Technology (IIIT) }{Allahabad, India} {Bachelor of Technology in Information Technology; GPA: 7.98/10.0 }{ 2004 -- 2008} \resumeItemListStart \resumeItem{Universal Digital Library (UDL)} {Involved with research and development of tools and technologies required for Indian digital libraries in collaboration with CMU, USA and IISc, India} \resumeItem{Technology Development in Indian Languages (TDIL) } {Worked towards development of software modules for various NLP tools tailored for the Indian Languages, particularly involved with POS Tagging (Parts of Speech) and NER (Named Entity Recognition)} \resumeItemListEnd \resumeSubHeadingListEnd %-----------Publications----------------- \section{Pulications} \resumeSubHeadingListStart \pubItem {Engineering a Controlled Natural Language into Semantic MediaWik} {Contolled Natural Language Extended Workshop 2011} { Pradeep Dantuluri, Brian Davis, Pierre Ludwick, Siegfried Handschuh} \pubItem {Towards Controlled Natural Language for Semantic Annotation } {International Journal on Sematic Web and Information Systems 2010 } { Pradeep Dantuluri, Brian Davis, Siegfried Handschuh, Hamish Cunningham} \pubItem {A Use case for Controlled Languages in Ontology based Knowledge Managemen } { International Language Resources and Evaluation, 2010 } { Pradeep Dantuluri, Brian Davis, Siegfried Handschuh} \pubItem {On Designing Controlled Natural Languages for Semantic Annotation } {Workshop on Controlled NaturaLanguage for Semantic Annotation, ESWC 2009 } { Pradeep Dantuluri, Brian Davis, Siegfried Handschuh, Hamish Cunningham} \pubItem {HMM-based Language-independent POS Tagger } {IICAI 2007} {Pradeep Dantuluri, Rakesh Masabattula, Ratna Sanyal} \resumeSubHeadingListEnd %--------PROGRAMMING SKILLS------------ % \section{Programming Skills} % \resumeSubHeadingListStart % \item{ % \textbf{Languages}{: Scala, Java, Javascript, Python} % \hfill % \textbf{Technologies}{: AWS, Play, React, Kafka, GCE} % } % \resumeSubHeadingListEnd %------------------------------------------- \end{document}
{ "alphanum_fraction": 0.7026359143, "avg_line_length": 44.962962963, "ext": "tex", "hexsha": "0a4722af2d3c987e1dc9bc1655141137c652431f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9c3995c286c296c65cfce14c72ce1ecaac3a8f9d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "pradeep122/resume-latex", "max_forks_repo_path": "pradeep_dantuluri_resume.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9c3995c286c296c65cfce14c72ce1ecaac3a8f9d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "pradeep122/resume-latex", "max_issues_repo_path": "pradeep_dantuluri_resume.tex", "max_line_length": 363, "max_stars_count": null, "max_stars_repo_head_hexsha": "9c3995c286c296c65cfce14c72ce1ecaac3a8f9d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "pradeep122/resume-latex", "max_stars_repo_path": "pradeep_dantuluri_resume.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2805, "size": 10926 }
\documentclass[amsmath,amssymb,aps,pra,reprint,groupedaddress,showpacs]{revtex4-1} \usepackage{multirow} \usepackage{verbatim} \usepackage{color,graphicx} \input{../Common} \begin{document} \title{Fibonacci Sequence Alternative Representation} \author{Lucchi Manuele} \email[]{[email protected]} \affiliation{IT Department Students, Universita' degli Studi di Milano, Citta' degli Studi, Milano, Italia} \date{\today} \begin{abstract} In this Research we will introduce a new representation of the Fibonacci Sequence starting from its Generative Function, the Binet Formula. The result will be an equation that can be parallelized in a multithreaded system and that only uses positive integers. \end{abstract} \maketitle \section{Introduction} The Fibonacci Sequence [1] is a simple, but really important, Sequence in Natural Numbers that can be found in a lot of places in nature, for example flowers' petals follow it in their numeration. The sequence is just the sum of its previous terms: $$ F_{n} = F_{n-1} + F_{n-2} $$ In 1843 Binet introduced its Generative Function called the Binet Formula [2] (even if it was already known a century earlier by Euler, Daniel Bernoulli and de Moivre) that allows everyone to calculate the $n_{th}$ Fibonacci Number without using a recursive approach $$f(n) = \frac{1}{\sqrt{5}} \left[ \left( \frac{1+\sqrt{5}}{2} \right) ^n - \left( \frac{1-\sqrt{5}}{2} \right)^n \right] $$ However this representation uses irrational numbers like $\sqrt{5}$ that causes a certain level of imprecision in the result that should be a positive integer. Our approach consists in using the Newton Binomial [3] to simplify all the irrational numbers and then optimize the result. \section{Transformation} The first step is to take out the $\frac{1}{2^n}$ $$f(n) = \frac{1}{2^n\sqrt{5}} \left[ \left( 1+\sqrt{5} \right) ^n - \left( 1-\sqrt{5} \right)^n \right]$$ Now we can notice that $(1 + \sqrt{5})^n$ and $(1 - \sqrt{5})^n$ are both in the form $(a + b)^n$ so they are suitable for the Newton Binomial transformation, so we can write them like \\ $ A_n:= \sum_{k=0}^n \binom{n}{k} \left(\sqrt{5} \right) ^k $ and $ B_n:= \sum_{k=0}^n \binom{n}{k} \left( -\sqrt{5} \right) ^k $ both with $k \in N$. If we unroll the sums we'll find some similarities between A and B.\\ So \begin{gather*} A = a\sqrt{5} +b5 + c\sqrt{5^3} + d5^2 + ... + z\sqrt{ 5 }^k \\ B = -a\sqrt{5} +b5 + -c\sqrt{5^3} + d5^2 + ... + z \left( -\sqrt{5} \right) ^k \end{gather*} with $a, b, c, d,$ ..., $z \in N$. \\ Since it's $f(n)=\frac{1}{2^n\sqrt{5}} \left[ A - B \right]$ we'll have \begin{align*} A - B &= a\sqrt{5} +b5 + c\sqrt{5^3} + d5^2 + ... + z\sqrt{5}^k \\ &- \left( -a\sqrt{5} +b5 + -c\sqrt{5^3} + d5^2 + ... + z \left( -\sqrt{5} \right)^k \right) \\ &= a\sqrt{5} +b5 + c\sqrt{5^3} + d5^2 + ... + z\sqrt{5}^k +a\sqrt{5} -b5 \\ &+ c\sqrt{5^3} - d5^2 + ... - z \left( -\sqrt{5} \right) ^k \end{align*} Now we can see that all members that have an integer exponent can be reduced. If we define $C:= A - B$ we get \begin{align*} C_n &= \sum_{k=0}^n \binom{n}{k} \left(\sqrt{5} \right) ^k - \sum_{k=0}^n \binom{n}{k} \left(-\sqrt{5} \right) ^k\\ &= \sum_{k=0}^n \binom{n}{k} \left[ \left(\sqrt{5} \right) ^k - \left(-\sqrt{5} \right) ^k \right] \end{align*} We can now split the sum again, this time differentiating between odd and even steps \begin{align*} C_n &= \sum_{k=0}^{n/2} \binom{n}{2k} \left[ \left(\sqrt{5} \right) ^{2k} - \left(-\sqrt{5} \right) ^{2k} \right]\\ &+ \sum_{k=0}^{n/2} \binom{n}{2k + 1} \left[ \left(\sqrt{5} \right) ^{2k + 1} - \left(-\sqrt{5} \right) ^{2k +1} \right] \end{align*} Now if we analyze the sums one by one, we can observe that the first one converges to 0, $\forall n \in{N}$ where $n$ is odd $$ \sum_{k=0}^{\frac{n}{2}} \binom{n}{2k} \left[ \left(\sqrt{5} \right) ^{2k} - \left(-\sqrt{5} \right) ^{2k} \right] = 0 $$ while the second one will become \begin{align*} C &= \sum_{k=0}^{\frac{n}{2}} \binom{n}{2k + 1} \left[ \left(\sqrt{5} \right) ^{2k + 1} - \left(-\sqrt{5} \right) ^{2k + 1} \right]\\ &= 2 \sum_{k=0}^\frac{n}{2} \binom{n}{2k + 1} \left(\sqrt{5} \right) ^{2k + 1} \end{align*} Since $2k + 1$ is always odd, we know that \begin{align*} &\left(-\sqrt{5} \right) ^{2k + 1} = - \left( \sqrt{5} \right) ^{2k + 1} \implies \\ &\left( \sqrt{5} \right) ^{2k + 1} - \left(-\sqrt{5} \right) ^{2k + 1} = 2\left(\sqrt{5} \right) ^{2k + 1} \end{align*} In the end what remains is our $S_n$ \begin{align*} S_n &= \frac{2}{2^n\sqrt{5}} \sum_{k=0}^\frac{n}{2} \binom{n}{2k + 1} \left( \sqrt{5} \right) ^{2k +1} \\ &=\frac{1}{2^{n-1}} \sum_{k=0}^\frac{n}{2} \binom{n}{2k + 1}5^{k} \end{align*} This is a first version of the alternative formula that already supports a certain level of parallelization and, since every Fibonacci's Number is an integer, doesn't need to handle real numbers like the Binet one \begin{thebibliography}{24} \bibitem{fibonaccigeneral} {OEIS}, \textit{Sequence A000045 (Fibonacci Numbers)} \bibitem{binetgeneral} {DA VEDERE}, \textit{DA VEDERE} \end{thebibliography} \end{document}
{ "alphanum_fraction": 0.6449785072, "avg_line_length": 47.8317757009, "ext": "tex", "hexsha": "16394f7a00a3394aa8dee3ba1d5dfa11d033ff01", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e3c683dddccefc8876de0c7bb30a3375d1f94929", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "manuelelucchi/Fibonacci", "max_forks_repo_path": "alternative_representation/fibonacci_alternative_representation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e3c683dddccefc8876de0c7bb30a3375d1f94929", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "manuelelucchi/Fibonacci", "max_issues_repo_path": "alternative_representation/fibonacci_alternative_representation.tex", "max_line_length": 213, "max_stars_count": 1, "max_stars_repo_head_hexsha": "e3c683dddccefc8876de0c7bb30a3375d1f94929", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "manuelelucchi/Fibonacci", "max_stars_repo_path": "alternative_representation/fibonacci_alternative_representation.tex", "max_stars_repo_stars_event_max_datetime": "2021-02-06T23:49:23.000Z", "max_stars_repo_stars_event_min_datetime": "2021-02-06T23:49:23.000Z", "num_tokens": 1958, "size": 5118 }
\documentclass{article} \newsavebox{\oldepsilon} \savebox{\oldepsilon}{\ensuremath{\epsilon}} \usepackage[minionint,mathlf,textlf]{MinionPro} % To gussy up a bit \renewcommand*{\epsilon}{\usebox{\oldepsilon}} \usepackage[margin=1in]{geometry} \usepackage{graphicx} % For .eps inclusion %\usepackage{indentfirst} % Controls indentation \usepackage[compact]{titlesec} % For regulating spacing before section titles \usepackage{adjustbox} % For vertically-aligned side-by-side minipages \usepackage{array, amsmath, mhchem} \usepackage[hidelinks]{hyperref} \usepackage{courier, subcaption} \usepackage{multirow, enumerate} \usepackage[autolinebreaks,framed,numbered]{mcode} \usepackage{float} \restylefloat{table} \pagenumbering{gobble} \setlength\parindent{0 cm} \renewcommand{\arraystretch}{1.2} \begin{document} \large MCB 135 Problem Set 8 \hfill Due Monday, April 13, 2015 at 2:30 PM \section*{Problem 1: Fluorescence Recovery After Photobleaching (40 points)} A protein's localization can be used to regulate its activity. Fluorescence Recovery After Photobleaching (FRAP) is one method to investigate whether a protein is diffusing freely or physically confined. The coding sequence of a fluorescent protein is appended to the open reading frame of the protein of interest. A small region of a cell expressing this construct is then photobleached so that all proteins in that region permanently lose fluorescence. Diffusion of nearby fluorescent proteins into the region gradually restores fluorescence. This problem will guide you through a calculation of the expected spatiotemporal profile of fluorescence recovery for diffusion in one dimension, which can be compared to experimental data to estimate the protein's diffusion coefficient. \begin{enumerate}[a)] \item Consider the initial concentration profile: \[ c(x,t=0) = \left\{ \begin{array}{lr} 0 & : x < 0\\ a & : x \geq 0 \end{array} \right. \] Using the fact that the ``impulse response function" for 1-D diffusion from a point source is $h(x,t)=e^{-x^2/4Dt}/\sqrt{4\pi D t}$, show via convolution that: \[ c(x,t) = \frac{a}{2} \left[ 1 + \textrm{erf} \left( \frac{x}{\sqrt{4Dt}} \right)\right], \hspace{3 cm} \textrm{ where }\textrm{erf} \left( z \right) \triangleq \frac{2}{\sqrt{\pi}} \int_0^z e^{-u^2} \, du \] \item A region along the axis of a rod-shaped cell is photobleached so that the initial concentration profile is: \[ c(x,t=0) = \left\{ \begin{array}{lr} 0 & : -L < x < L\\ a & : \textrm{otherwise} \end{array} \right. \] Find an expression for $c(x,t)$ in terms of the error function erf(). (Hint: this can be done without taking any more integrals.) \item Plot $c(x,t)$ from part (b) for $x\in[-20,20]$ at $t=0.01, 1,$ and $100$. Use the parameter values $D=1$, $a=1$, and $L=5$. \item Outline how you would estimate $D$ if given a single fluorescence profile collected $\tau$ seconds after photobleaching. You may assume that photobleaching is perfectly efficient, and that $L$ and $x$ are known. \end{enumerate} \section*{Problem 2: Epidemic (60 points)} A disease spreads through a population of $N$ persons: $x$ of them are infected, and the remainder, $s=N-x$, are susceptible. When the infection subsides, a person becomes susceptible again (no immunity is conferred). Infection and recovery are modeled by two events: \[ \ce{X + S ->[k_1] X + X} \hspace{3 cm} \ce{X ->[k_2] S} \] \begin{enumerate}[a)] \item What is the analog of the system size $\Omega$ in this model? \item What is the stoichiometry matrix for these events? \item What are the two event propensities $\Omega r_i(x, \Omega)$? \item Using your answers to (a)-(c), find expressions for the first and second jump moments, $\mu(x,t)$ and $\sigma^2(x,t)$. \item Write down an expression for $dx$ in Langevin notation. \item Simulate the system using the Euler-Maruyama method with parameters $N=1000$, $k_1 = 0.2$, and $k_2 = 0.1$ and with step size $\Delta t = 0.1$ and $t \in [0,1000]$. Include a plot with three sample trajectories with initial values $x(0)=100,500,$ and $900$. \item Use Wright's formula to find an expression proportional to the stationary probability distribution of states, $P(x)$. Do not calculate the normalization constant. Hint: to save headaches later, don't omit the absolute value notation when taking integrals of the form $\int \frac{dx}{x} = \ln |x|+C$. \item In the real world, what would happen if $x$ chances to reach zero? How does this reflect what will happen if you attempt to normalize your expression for $P(x)$ over $x \in [0, N] \cap \mathbb{Z}$? \end{enumerate} As you saw in part f, this system has a different, ``pseudo-stable" behavior that is apparent on intermediate timescales. We can investigate it by normalizing $P(x)$ over all $x \neq 0$. \begin{enumerate}[a)] \setcounter{enumi}{8} \item Numerically calculate $P^*(x)$, the ``pseudo-stationary probability distribution," by normalizing $P(x)$ with the parameter values given above for $x \in [1, N] \cap \mathbb{Z}$. Hint: to minimize rounding errors, calculate $\ln P(x)$ for each value of $x$, subtract the minimum value in this array from all values in the array, then exponentiate and normalize. \item Calculate the mean $m$ and standard deviation $s$ of $P^*(x)$. \item Add lines to your plot from part (f) to mark $x=m-2s$ and $x=m+2s$. Do your simulated trajectories tend to remain within these bounds after reaching the pseudo-stationary distribution? \end{enumerate} \end{document}
{ "alphanum_fraction": 0.7337321008, "avg_line_length": 72.5921052632, "ext": "tex", "hexsha": "8d81a9b4413a661ed99ed5d50045e3da7fcd963a", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2020-03-25T14:42:10.000Z", "max_forks_repo_forks_event_min_datetime": "2017-01-20T17:43:51.000Z", "max_forks_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "mewahl/intro-systems-biology", "max_forks_repo_path": "problem set keys/ps8/mcb 135 problem set 8.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "mewahl/intro-systems-biology", "max_issues_repo_path": "problem set keys/ps8/mcb 135 problem set 8.tex", "max_line_length": 782, "max_stars_count": 3, "max_stars_repo_head_hexsha": "95ad58ec50ef79d084e71f4380fbfbf5e1603836", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "mewahl/intro-systems-biology", "max_stars_repo_path": "problem sets/ps8/mcb 135 problem set 8.tex", "max_stars_repo_stars_event_max_datetime": "2019-01-31T17:23:09.000Z", "max_stars_repo_stars_event_min_datetime": "2017-01-20T17:43:31.000Z", "num_tokens": 1568, "size": 5517 }
\section{Implementation} \label{sec:impl} % Summarize the section Within this section a defined set of project milestones that divide the problem description into smaller parts is presented. The project's prototyping efforts with the chosen hardware is then thoroughly covered: issues encountered, how these were resolved, and ultimately the final design choices. Lastly, the system's software components are covered: what they are supposed to do, how they work, and how they relate to other components. % Summarize where project source can be found and how to interpret file references. The source for this project (as well as the source code for this report) is available at the git repository hosted publicly at \href{https://github.com/tmplt/ed7039e}{github.com/tmplt/ed7039e}. If not otherwise specified, any references to a repository shall mean this repository. Any and all references to files/directories will be to paths relative the repository root. For example, \texttt{report/} and \texttt{src/lcm-source-dwm.c} refer to \href{https://github.com/tmplt/ed7039e/tree/master/report}{github.com/tmplt/ed7039e/tree/master/report} and \href{https://github.com/tmplt/ed7039e/tree/master/src/lcm-source-dwm.c}{github.com/tmplt/ed7039e/tree/master/src/lcm-source-dwm.c}, respectively. % We chose to implement our System on a Raspberry Pi. % This means our system is not in real-time (Linux too complex, other reasons) % Allows people not versed in embedded systems to write implementations % A proper implementation would be on a micro-controller that allows code to be run bare-metal, without having to fight with the Linux kernel. \subsection{Milestones} \label{sec:milestones} The project was divided into four milestones: \begin{enumerate} \item \textbf{Two-dimensional navigation:} the system should be able to determine its coordinates in an ad-hoc, localized grid. From its initial position, it should then be able to respond to movement commands on the form ``move to position $(x, y)$''. \item \textbf{Navigation-line detection:} using the subsystem for two-dimensional navigation, the system is to cross a line on the floor, thus detecting it and follow it towards the station. \item \textbf{Station proximity detection, object pickup:} once the navigation-line is being followed, the system is to sense when it is sufficiently close to the station to readily use its arm to pick the object up. \item \textbf{Object displacement, drop-off:} after the object has been picked up, the system is to move to another station, find its navigation-line, follow it, and drop the object. Note that this milestone is a permutation of the combination of the previous milestones: the same phases should be done in the same order, but the system is to move to the second station instead and execute the pickup-process in reverse. \end{enumerate} \subsection{Prototyping} This section thoroughly cover the prototyping efforts regarding both hardware and software of the project, particularly that of data acquisition and processing. \subsubsection{NixOS} % Introduce NixOS and what is gives us In this project it was decided that NixOS should be used as the operating system on which to run our software. NixOS is a Linux distribution (henceforth referred to as a ``distro'') based upon the Nix package manager (and the terms will be used interchangeably henceforth) that aims to be \begin{inline-enum} \item reproducible: ``packages [are built] in isolation from each other. [\ldots] they are reproducible and don't have any undeclared dependencies.''\footnote{A positive side-effect of this feature is the complete mitigation of ``dependency hell'': a term relating to a set of problems that commonly arise if multiple versions of a dependency are installed on a system.}; this means that if a package works on one machine, it will work on any machine\footnote{As long as the machines' hardware architectures are all supported.}. Also, when building a package on multiple machine, all machines will yield the exact same output file tree. \item Declarative: packages are described in expressions that are trivially shared and combined with other package declarations; and \item reliable: ``installing or upgrading one package cannot break other packages''. \end{inline-enum}~\parencite{nixos.org} % What is the effect of the above? Effectively, NixOS allows its user to functionally declare their system in a single (or multiple) expressions --- that is: Nix enables the user to describe the software components of a system in a common manner and how the components relate to one another. The realization of these expressions can be seen in the repository's \texttt{*.nix} files. Of particular interest is \texttt{mmc-image.nix}: evaluating this expression generates a bootable image readily flashed onto a MultiMediaCard (MMC)\footnote{Commonly referred to as: SD card, memory card.} that contains the full software stack and operation instructions of the project's robot system. With the combination of git, the custom software is not only checked into version control, but so are all dependencies and the complete system behavior. % How does it differ from a conventional disto? NixOS is a contrast to the conventional Linux distribution where changes are made to the system via iterative global changes to the system state; the installation and configuration of software piece-by-piece, for example. In this development mode, it is common to apply ``small changes'' that eventually coalesce into a significant diversion from the originally intended system behavior (a behavior that likely is not the same a few project iterations later) --- changes made are not always documented which create a dependency on the ever-changing global state of the system (that is, the file system in which the system is prototyped/implemented). Of note within this mode is that if the file system is lost, many hours of work may have been wasted if necessary precautions were not adhered to (by all project members) from the start of the project (such as writing a script that apply all global state changes from a fresh install, for example). The usage of NixOS theoretically \textit{forces} its user to adhere to such precautions \textit{if} combined with version control. % What are the negatives of NixOS? NixOS is no silver bullet, however. Because of its design, whenever something is to be implemented on NixOS it must be done properly the first time. Its non-adherence to the Filesystem Hierarchy Standard (FHS)\footnote{the existence and FHS-defined usage of \texttt{/usr}, \texttt{/lib}, \texttt{/var}, and other Unix-standard system directories.} breaks both the build and execution process of many programs. Unless a program is portably written, it must be patched before it can be used on NixOS where all system files exist under a common prefix (\texttt{/nix/store}) which is required to enable the features enumerated in the previous paragraphs. % Hardware issues Another problem is the distro's relative infancy to other distros, particularly when it comes to hardware support. NixOS builds with a generic Linux kernel by default. A generic kernel is expected to run on a generic system. Because of the availability of hardware peripherals such as general-purpose input/output pins, GPIO; universal asynchronous receiver-transmitter, UART; serial peripheral interface, SPI; and other on the Raspberry Pi which is used in this project, the project's system is not generic. The result of this is that these peripherals, which are all required, simply do not work. Fortunately, some work has already been done (and is still being done) by the NixOS community to remedy this. By applying a so called device-tree overlay on the Linux kernel (and thus describing how an otherwise unknown peripheral may be utilized) SPI was successfully enabled, which was required to actuate the robot's motors. Other required components could be utilized by help of the Raspberry Pi's USB port (fortunately a generic peripheral). Had it been decided to use a distro officially supported by the Raspberry Pi, all of these peripherals would simply work out of the box, but that would result in a loss of the features previously enumerated. % What about the RPi kernels? Some non-generic kernels are offered by NixOS that imply support for Raspberry Pis, but offer none in practice. The reason for these kernels' availability is presently unknown. Inspecting the expressions that define these kernels however, it is found that they apply some non-generic procedures to enable a proper boot sequence of the hardware. It may be the case that these were added without any consideration to full peripheral support. % We are however satisfied for prototyping purposes. While its technically possible to enable proper hardware support for all peripherals in the kernel that are required, the project is on a deadline. As such, we are satisfied even though only SPI is properly supported; remaining peripherals can instead be utilized by help of dedicated hardware that offer an USB-interface. \subsubsection{System-external services} % What external services do we offer? To ease the prototyping efforts on the Raspberry Pi a convenience system service was drafted up with help of Nix: by declaring an expression that contains the login credentials to the university's wireless network (eduroam), and a system service that automatically establishes a reverse SSH proxy to a project-controlled server with a static IP address, the network limitations of eduroam\footnote{Particularly that of network address translation (NAT), where a server behind NAT may not be accessed from the outside unless some control of the firewall is at hand, which we did not have.} were mostly side-stepped. % And this resulted in what? Effectively, this reverse proxy allowed any project member to access the Raspberry Pi via SSH from anywhere with an Internet connection, which helped both during implementation and debugging. % TODO: note that the SSH proxy only works for a single RPi. We can’t % connect to two different RPis at the same time. % TODO: note that SSH thinks a MitM is going on because system ID % changes with every rebuild. \subsubsection{Decawave} % What is the decawave? How does it work? Summarize This project uses Decawave (or more specifically: a DWM1001 development board) for two-dimensional positioning. The development board constitutes of a ultra wide-band module, the DWM1001C, an accelerometer, and a Raspberry Pi-compatible GPIO-header. By help of a set of ``anchors''\footnote{An anchor is another development board configured for static installation at a known coordinate, used for position calculation. A non-static device with unknown coordinates (as is used in our system) is known as a tag.} a ``tag'' is capable of determining its position relative to the connected anchors by calculating the time-of-flight of messages sent to and from the anchors with satisfactory accuracy. Thus, by using the development board's (henceforth referred to as a/the ``DWM'') USB-interface, position and acceleration data is queried and used to determine the relative position of the robot in the mimicked factory. % Describe the TLV API we wanted to use. The DWM exposes two modes over its USB-interface which can use to extract data of interest. One of the modes is a type-length-value (TLV) API which is very suitable for automated interaction: to extract data using this mode, one need only write three bytes on the form \texttt{\textbf{MSB}}, \texttt{\textbf{MSB-1}}, \texttt{\textbf{MSB-2}}, \texttt{\textbf{...}} where: \begin{description} \item[\texttt{MSB}] is the \textit{type} of data one wants to send. \texttt{0x40} is specified to call a function via the API. \item[\texttt{MSB-1}] is the \textit{value} one wants to sent. \texttt{0x02} is specified (for example) to call a function asking for the DWM position. \item[\texttt{MSB-2}] is the length of the data one wants to send. Function \texttt{0x02} takes no payload, so \texttt{0x00} is specified here. \item[\texttt{...}] would be a payload of length \texttt{\textbf{MSB-2}} had the function type taken a payload of non-zero size. \end{description} One sequence of these bytes constitute a TLV ``frame''. % Explain the TLV frames, and how nice they are to work with. After this frame has been written over the serial connection two TLV frames are received in response: the first (of length 3 bytes) denotes whether the function call was successful and the second (at least 3 bytes long) the function return data. If the first frame says success, the next two bytes are read from which is derived what kind of data the remaining incoming bytes should be interpreted as, and how many more should be read. Implementation-wise, always knowing how much data to read allows for performant I/O\footnote{input/output operations: many of which require calling system functions; this is a relatively costly operation.} and easier error handling. It also allows easy construction of subsequent system messages (see sections~\ref{sec:ROS} and \ref{sec:LCM}), because they are just a single \texttt{memcpy(3)}\footnote{\texttt{memcpy} - copy memory area: a common (and performant) operation where a given amount of bytes are copied from a source address to a destination address.} away as the byte stream can be trivially represented as a \texttt{struct} following the API documentation. % No access to acceleration data using the TLV API; generic shell required. Unfortunately, the TLV API does not expose a function for reading the accelerometer data which is required to estimate the direction of our robot. A command is, however, readily available via the interactive shell mode. This mode instead replies in a human-readable format, but without telling the caller how many bytes are to be read beforehand, nor in what way to interpret the data read. Fortunately, when the shell is ready to process new input, it writes a ``\texttt{dwm>}'' shell prompt; bytes can simply be read until this string is encountered. This ultimately boils down to more I/O operations and a parsing procedure upon data reception, and operation that is slower than a mere \texttt{memcpy}, but is nevertheless readily available via a proper call to the \texttt{scanf(3)} family of functions. An alternative approach where both the TLV API and the shell mode was used was tested but ultimately scrapped: the time to transition from the TLV API to the shell mode took approximately $1$~s, which broke the robot system's $10$~Hz requirement. \textit{Why} accelerometer data can be queried in one mode but not the other is presently unknown. It is surmised that the existence of the shell mode is for debugging purposes only (because of its human readable format --- and its insistence of using different methods of formatting for similar data types) and that accelerometer data is omitted from the TLV API because it is used by the internal localization functions to detect when the hardware is stationary, or because the developers simply did not foresee this kind of utilization, or both. Nevertheless, the data received when asking for the position is a tuple of $(x, y, z, q)$, where \begin{description} \item[$(x, y, z)$] is the reported coordinate in millimeters in three-dimensional space, and \item[$q$] is the quality factor: a measure of how sure the device is of the coordinates. \end{description} Of note is that the device cannot approximate its position unless it can connect to at least three anchors. Additionally, the quality factor, $q$, is higher when connected to four anchors, but follow no other pattern; the DWM manual contain no formula for its derivation and questions posed by other users of the hardware on official forums have been met with inconclusive answers from the vendor. $q$ can thus not be used as a fully qualitative factor of $(x, y, z)$, but more as an indicator if the coordinates are ``good'' of ``bad''. Alternatively as a simple indicator whether the DWM is connected to three or four anchors. Because of the three-anchor dependency, a system state where less than three anchors are available must be considered: what should be done when the robot does not know where it is located? How can anchor communication be established? Additionally, the received when asking for the acceleration is a tuple of $(x, y, z)$, where \begin{description} \item[$(x, y, z)$] is the reported acceleration in three-dimensional space described as raw registers. \end{description} % TODO: explain how the register values are calculated to m/s^2 These raw register values are then converted to SI units using a known formula. See \verb|src/lcm-sourcm-dwm.c| for the procedures that communicate with the DWM. % The data can be considered a random process. % (X, Y, Z, Q); how is Q calculated? % What should we do if we cannot connect to 4 anchors at once, a wait? % Mention that: % - we have to account for the fact when we tag cannot connect to at least 3 anchors. % - Qualitative data depends a lot on the positioning of the anchors % - Built-in 3-axis accelerometer % - Raspberry Pi compatible GPIO header. Communication via UART. % - How should we interpset data? It is random proccess? Can we consider noise gaussian? % TODO: % RPi UART problems \subsubsection{BrickPi3} \label{brickpi 3} The BrickPi3 is a peripheral that allows a Raspberry Pi to work with LEGO Mindstorms hardware. It works by communicating via the SPI function pins of the Raspberry Pi. The recommended way to install all necessary components is via a \texttt{curl -k | bash}. There are a few issues with this approach: \begin{inline-enum} \item \verb|-k| is an alias for \verb|--insecure|; the recommended approach is thus to not verify the server certificate --- this allows a bad actor to feed the caller malicious code if they have access to their DNS or the target domain. \item A \texttt{curl | bash} is bad practice for installation purposes as it commonly installs files that are disconnected from the system's package manager, thus putting the file system in a ``dirty'' state. \item A \texttt{curl | bash} can be detected server-side and thus can conditionally feed a user malicious code. A dowload of the code first may thus pass a manual inspection before execution. \parencite{curl-bash} \end{inline-enum} Because the project uses NixOS, the content of the script had to be inspected so that an equivalent Nix expression could be written --- see the \texttt{brickpi3} attribute in \texttt{nix/derivations.nix} for the final result. Upon inspection, a few oddities stood out. The script: \begin{enumerate} \item expects and requires the script to be run by the user \texttt{pi}\footnote{Not all users of the peripheral is \texttt{pi}. For example, we use it as \texttt{root} while prototyping.}; \item changes the ownership of a directory with \texttt{sudo(8)} on files under \texttt{/home/pi}, to \texttt{pi}\footnote{In this context, the operations could all have been done as \texttt{pi}.}; \item insecurely downloads multiple scripts and executes them silently --- the downloaded scripts do the same; \item configures an \texttt{apt(8)} repository (and thus requires to be run on a Debian distro) for \texttt{npm(1)}, the Node JavaScript package manager, but never installs or executes any JavaScript packages; \item installs a C++ source file under \texttt{/usr/local/include}\footnote{A proper installation would be to build a shared library which can then be dynamically linked to when using the C++ drivers.}; \item downloads a precompiled version of \texttt{openocd(1)}, an on-chip debugger and programmer, and copies the files into system directories\footnote{No changes are made to the software according to the mirror's documentation. An installation should instead then be made with the package manager, which is otherwise used in the scripts to install other components}, and then never uses it; \item runs \texttt{git(1)} as a privileged user, sometimes. \end{enumerate} The above list is truncated for sake of brevity. After a thorough manual inspection of all scripts it was found that only a single Python library (with a single dependency) had to be installed. The final Nix expression is thus a combination of two \texttt{python3Packages.buildPythonPackage} where both sources are securely downloaded from official mirrors and verified with a known checksum. We conclude that the usage of this Nix expression leaves the system in a proper state (which the official installation script does not, by oddity 5 and 6\footnote{we consider a proper state of system one in which all installed software components are tracked by the package manager(s).}) and greatly decreases the number of attack vectors with which to run malicious code on our system. \subsubsection{RobotOS} \label{sec:ROS} % We wanted to use ROS as it was very common to the problem space, and had a lot of readily available solutions for common robot problems. It was initially decided that the robot system would be implemented with RobotOS (ROS), ``a set of software libraries and tools that help you build robot applications.''\footnote{See \href{https://www.ros.org/}{https://www.ros.org/}.} The chief reason was its common application in the problem space, its API for communicating different types of messages between different programs\footnote{Known as inter-process communication (IPC).} (in this context known as ``nodes''), and the many readily available solutions to problems we were likely to stumble upon. % Only officially supports very specific Ubuntu versions, and while probably very applicable to use Nix in this case, it was deemed % composing ROS on Nix would take too long. (the dependency tree is HUGE) However, ROS is only officially supported on very specific versions of Ubuntu (at this time of writing), a distro we were not using and a distro that had a very different design philosophies from NixOS; using ROS on NixOS would thus require a Nix expression to be written that correctly packages the software. At this point, it was surmised that ROS made several assumption about the global system state that had to be addressed during packaging. This reason alone would likely require a lot of prototyping time for a simple proof-of-concept execution. A consultation from another effort to port ROS to an unofficial repository showed that a full desktop installation is constituted of up to 460 packages.\footnote{See \href{https://github.com/ros-noetic-arch}{https://github.com/ros-noetic-arch}, which packages RobotOS to Arch Linux, a distro that is not officially supported by the RobotOS project. Each repository corresponds to a ROS package.} It was thus decided to find an alternative to RobotOS due to time constrains. \subsubsection{LCM} \label{sec:LCM} % Trivial to package: just a simple mkDerivation. Nodes are similarly easily packaged. See `nix/software-nodes.nix`. Lightweight Communications and Marshalling (LCM) ``is a set of libraries and tools for message passing and data marshalling [\ldots] It provides a publish/subscribe message passing model and automatic marshalling/unmarshalling code generation with bindings for applications''. LCM effectively provides a set of simple functions that enable IPC with the benefit of not requiring a special-purpose daemon (as is required when running ROS). In difference to ROS, LCM supports any GNU/Linux system (and thus NixOS). Its short list of dependencies made LCM trivial to package with Nix: the final \texttt{lcm} attribute in \texttt{nix/derivations.nix} can be summarized as a \texttt{stdenv.mkDerivation} and the whitelisting of an UDP port in the system firewall to enable its execution. % Support for C and Python which we have decided to use thus far. % The core component of ROS we wanted was the message-passing (IPC) component, which this library provides for ANY POSIX-compliant system. Thus, because LCM: \begin{inline-enum} \item enables us to trivially utilize IPC with different message types; \item has bindings for C and Python; and \item is trivially packaged, \end{inline-enum} it was decided that the project's robot system would be implemented with help of it in place of ROS. \subsection{Robot hardware} \label{sec:simon1} The robot designated the task of moving the object on the industry platform is built from the Lego Mindstorms EV3 core set package. The design of the robot is based from the Robot arm model from Legos EV3 core set instructions, but is modified with continuous tracks on the base of the robot, longer arm and some reinforcement to make the robot oscillate less when moving. Instead of using the included EV3 Intelligent Brick, a Raspberry Pi 3 together with two BrickPi3 motor shields, see section \ref{brickpi 3}, was used to control the robot. The motors for moving the arm was the EV3 large servomotors, and a EV3 servo motor medium was used for the gripping mechanism. \subsection {Mobile platform and trailer} \subsubsection{Mobile Platform} A simplified model of the mobile platform can be seen in Fig.~\ref{Mobile_platform_paint} and Fig.~\ref{Band_platform_coordinates}. The power from each motor is transmitted by a beam to three interconnected gears which in turn are enscircled by a rubber band. This configuration creates two continues tracks powered by one motor each. Due to the fact that each continuous track is built in parallel to the platform, the robot is limited to a movement defined by differential drive. As a consequence, the torque in each motor needs to be changed from its initial value in order for the robot to make a turn and change direction from its initial path. \begin{figure}[h] \centering \includegraphics[width=\linewidth]{sections/assets/Mobile_platform_paint_text3_horiz.PNG} \caption{The Mobile Platform seen from above.} \label{Mobile_platform_paint} \end{figure} \begin{figure}[h] \centering \includegraphics[width =\linewidth]{sections/assets/Band_platform_coordinates.PNG} \caption{The Mobile Platform seen from the side of one track.} \label{Band_platform_coordinates} \end{figure} \subsubsection{Trailer} In order to get power to each motor servo and the Raspberry Pi, two battery packs with 8 batteries in each were used. Since these battery packs together with the Raspberry Pi and the two BrickPi motorshields were to large to place on the mobile platform, a trailer was built and mounted behind the platform with the devices stacked on it. The trailer can be seen attached to the mobile platform in Fig.~\ref{Trailer}. \begin{figure}[h] \centering \includegraphics[width =\linewidth]{sections/assets/Trailer.PNG} \caption{The Mobile Platform seen from the side of one track.} \label{Trailer} \end{figure} \subsection{Line follower} The docking sequence of the robots movement was implemented using a line follower. A QTRX reflectance sensor from Pololu was used together with a black line to guide the robot close to the factory and to stop it with a perpendicular angle to the factory. During the project, there occurred a connection issue between the Raspberry Pi and the sensor array due to the fact that NixOS could not handle the incoming data. This issue was resolved by buying an additional breakout board which, in turn, had its own GPIO pins and at the same time could be connected with the Raspberry Pi 3 through another medium, in this case via a usb-cable. The breakout board used in this project was the Adafruit FT232H Breakout. \subsubsection{Adafruit FT232H breakout} Adafruit FT232H Breakout is a breakout board from Adafruit which contains the F232H chip from FTDI and has has one port each for a usb c and stemma qt male connectors. The board is also equipped with 16 GPIO pins and also has the ability to speak with many protocols including SPI, ITC, serial UART, JTAG and others. In the project, this board was used to transfer GPIO singals from individual sensor on a sensor array to the Raspberry Pi computer. \begin{figure}[h] \centering \includegraphics[width =\linewidth]{sections/assets/FT232H.jpg} \caption{FT232H Breakout.} \label{FT232H} \end{figure} \subsubsection{Sensor Array} The Sensor array used in this project was a QTRX-MD-16A Reflectance array from Pololu. Each sensor on the sensor array is made from a photo transistor and paired with an IR-led which powers on and keeps emitting light while the sensor array module is powered on. The signal from the photo transistor is transmitted as an analog voltage from the sensor array. \begin{figure}[h] \centering \includegraphics[width =\linewidth]{sections/assets/Sensor_array.jpg} \caption{QTRX-MD-16A.} \label{Sensor_array} \end{figure} \subsubsection{Implementation of routine} As mentioned earlier, the signal from the sensor array was transmitted through the breakout board to the Rapberry Pi, this signal included the state of each photo transistor. The state of each transistor is being displayed as TRUE if the black line is underneath it and FALSE if the transistor only detects a white surface underneath. Since a pre-owned sensor array was used, only 4 functioning photo transistor/LED pairs that were close to each other were available, limiting the line following procedure to 4 reference points. More reference points may have proven to be more optimal in the case whit sharp angles and faster speeds. However the fact that the line only triggers 2 or less reference points at each time, made us determine that the sensor array was good enough for the given task. The position of the line underneath the sensor array was determined by setting a weight on each photo transistor. The weights of each sensor corresponded to its position from the leftmost position in the robot's forward direction to the rightmost position, giving the weight of the first sensor the value of 1 and the weight of the fourth sensor the value of 4. By summing all weights from the transistors that detected the line and dividing this value with the number of transistors that detected the line, a mean value of the active weights can be derived, see Fig.~\ref{Line_Position}. M is the mean value of the weights of transistors that detected the line. The value of each Sn is 1 if the transistor detecteda line underneath it or 0 if a white surface was detected. \begin{equation} M=\frac{1*S1+2*S2+3*S3+4*S4}{S1+S2+S3+S4} \label{Line_Position} \end{equation} When the line is perpendicular to the mobile platform and underneath the two middle photo transistors, the mean value will be 2.5. This value is determined as the reference value of the sensor array, we denote it by R, whenever the mean value from the sensor array will be different from it, power will be added to one motor servo in the mobile platform and reduced from the other motor servo in the platform. The difference between the reference and the mean value of the sensor array will be called the error, see eq. \begin{equation} E=R-M \label{Error_Position} \end{equation} An example of the how the error is calculated using Eq.~\eqref{Line_Position} and \eqref{Error_Position} together with data from the diodes can be seen in Fig.~\ref{Line_Error_ex} \begin{figure}[h] \centering \includegraphics[width =\linewidth]{sections/assets/Line_Error_ex.PNG} \caption{Calculation of the error in the case where the two phototransistors on the right are detecting the line} \label{Line_Error_ex} \end{figure} By creating a controller with the purpose of minimizing the error, the robot will be controlled to follow the line. \subsection{Robot arm} \label{sec:simon2} A simplified model of the robot arm can be seen in Fig.~\ref{Arm_model} where the measured distances from \(d_1\) to \(d_7\) and the vertical angle Theta can be seen. in Fig.~\ref{arm_overview} an overview of the robot model can be seen showing how the angle beta was specified. \begin{figure*}[h] \centering \includegraphics[width=\linewidth]{sections/assets/Arm_model.png} \caption{Arm model side view.} \label{Arm_model} \end{figure*} \begin{figure}[h] \centering \includegraphics[width=\linewidth]{sections/assets/Arm_overview.png} \caption{Arm model overview.} \label{arm_overview} \end{figure} The robot arm can rotate in the xy-plane around the \(z_0\) axis which can be seen in Fig.~\ref{Arm_model} with a angle Beta using a EV3 large servomotor mounted inside the base of the robot. The upper part of the robot, from wheel1 to the gripper claw can also rotate vertically in the xz-plane using another EV3 large servomotor mounted between wheel0 and the point on the base from where the axis \(z_0\) is originating. The gripper claws driven with a EV3 servo motor medium since it weighs less. The distances \(d_0\) to \(d_7\) was measured with a ruler and can be seen in table \ref{Tab:distance_table} \begin{table}[h] \begin{center} \begin{tabular}{ |c|c| } \hline \(d_0\) & 8 cm \\ \(d_1\) & 1.5 cm \\ \(d_2\) & 11.7 cm \\ \(d_3\) & 2.2 cm\\ \(d_4\) & 2.2 cm\\ \(d_5\) & 23.5 cm\\ \(d_6\) & 1.5 cm\\ \(d_7\) & 11 cm \\ \hline \end{tabular} \end{center} \caption{Table of measured distances \(d_1\) to \(d_7\).} \label{Tab:distance_table} \end{table} \subsubsection{Forward kinematics} \label{sec:simon3} To be able to describe where in the world the gripper claw is, a problem often refereed to as the forward kinematics problem \parencite{Spong2004} had to be solved. This was used with a series of homogeneous transformations each representing a change in position and/or orientation relative to what is usually referred to as a frame. The first homogeneous transformation was from the level of the floor to the top of the robot platform, with a distance \(d_0\) which can be seen as a pure translation in the z-direction and can be described as Eq.~\eqref{T01} as according to \parencite{Spong2004}. \begin{equation} T_{01} = \begin{bmatrix} 1&0&0&0\\ 0&1&0&0\\ 0&0&1&d_0\\ 0&0&0&1\\ \end{bmatrix} \label{T01} \end{equation} then a transformation between the rotational center of the robot to where link \(d_2\) is connected to the base of the robot, which can be expressed as a pure translation in the x-direction. This can be seen in Eq.~\eqref{T12} \begin{equation} T_{12} = \begin{bmatrix} 1&0&0&d_1\\ 0&1&0&0\\ 0&0&1&0\\ 0&0&0&1\\ \end{bmatrix} \label{T12} \end{equation} followed by a transformation from the beginning of link \(d_2\) to the center of wheel0. This was done by first making a pure rotation around the z-axis with a angle beta, after which a pure translation in the z-direction with a distance \(d_2\cdot sin(\pi-alpha)\) was made, followed by a translation in the x-direction with a distance \(d_2\cdot cos(\pi - alpha)\) as can be seen in Eq.~\eqref{T23} \begin{equation} \resizebox{0.8\hsize}{!}{$ T_{23} = \begin{bmatrix} cos(beta)&-sin(beta)&0&0\\ sin(beta)&cos(beta)&0&0\\ 0&0&1&0\\ 0&0&0&1\\ \end{bmatrix} \cdot \begin{bmatrix} 1&0&0&0\\ 0&1&0&0\\ 0&0&1&d_2\cdot sin(\pi-alpha)\\ 0&0&0&1\\ \end{bmatrix} \cdot \begin{bmatrix} 1&0&0&d_2\cdot cos(\pi-alpha)\\ 0&1&0&0\\ 0&0&1&0\\ 0&0&0&1\\ \end{bmatrix} $ } \label{T23} \end{equation} this was followed by a translation in the z-direction of the distance \(d_3\) between the center of wheel0 to the center of wheel1, see Eq.~\eqref{T34} \begin{equation} T_{34} = \begin{bmatrix} 1&0&0&0\\ 0&1&0&0\\ 0&0&1&d_3\\ 0&0&0&1\\ \end{bmatrix} \label{T34} \end{equation} after which a rotation of the coordinate frame in the center of wheel1 was made by an angle -theta around the y-axis and then a translation in the x-direction of the frame in wheel1 by a distance \(d_5 + d6\), see Eq.~\eqref{T46} \begin{equation} T_{46} = \resizebox{0.8\hsize}{!}{$ \begin{bmatrix} cos(-theta)&0&sin(-theta)&0\\ 0&1&0&0\\ -sin(-theta)&0&cos(-theta)&0\\ 0&0&0&1\\ \end{bmatrix} \cdot \begin{bmatrix} 1&0&0&d_5 + d_6\\ 0&1&0&0\\ 0&0&1&0\\ 0&0&0&1\\ \end{bmatrix} $ } \label{T46} \end{equation} due to the mechanics of the robot model the gripper claw is always pointing straight downwards independent of the angle theta, therefore the coordinate system in the middle if the medium motor connected to link \(d_5\) was rotated back with a positive angle theta before it was translated a distance \(-d_7\) in the z-direction as can be seen in Eq.~\eqref{T67} \begin{equation} \resizebox{0.8\hsize}{!}{$ T_{67} = \begin{bmatrix} cos(theta)&0&sin(theta)&0\\ 0&1&0&0\\ -sin(theta)&0&cos(theta)&0\\ 0&0&0&1\\ \end{bmatrix} \cdot \begin{bmatrix} 1&0&0&0\\ 0&1&0&0\\ 0&0&1&-d7\\ 0&0&0&1\\ \end{bmatrix} $ } \label{T67} \end{equation} the expression of where the gripper claw can be then expressed as in Eq.~\eqref{T_tot} where both it's position and orientation relative to the robot platform is described. \begin{equation} T = T01\cdot T12\cdot T23 \cdot T34 \cdot T46 \cdot T67 \label{T_tot} \end{equation} \subsubsection{Inverse kinematics} \label{sec:simon4} After being able to describe the position and orientation of the gripper it was necessary to have a way of calculating the desired angles theta and beta of the arm to get the gripper claw to a desired position. This problem is often referred to as the inverse kinematics problem. For this robot the angle theta was directly determined by the height of the desired gripper claw position which was calculated according to Eq.~\eqref{inv_theta_calc_1} to \ref{inv_theta_calc_5} and the distances \(h_1\) to \(h_3\) can be seen in Fig.~\ref{inv_theta_img} \begin{figure}[h] \centering \includegraphics[width=\linewidth]{sections/assets/Arm_model_inv_theta.png} \caption{distances \(h_1\) to \(h_3\) on arm model.} \label{inv_theta_img} \end{figure} \begin{equation} Z_g = h_1+h_2 - h_3 \label{inv_theta_calc_1} \end{equation} and from Fig.~\ref{inv_theta_img} it can be seen that \begin{equation} h_1 = d_2 \cdot sin(alpha) \label{inv_theta_calc_2} \end{equation} \begin{equation} h_2 = (d_5 + d_6) \cdot sin(theta) \label{inv_theta_calc_3} \end{equation} \begin{equation} h_3 = d_7 \label{inv_theta_calc_4} \end{equation} substituting \(h_1\) to \(h_3\) with expressions from Eq.~\eqref{inv_theta_calc_2} to \eqref{inv_theta_calc_4} the final expression for the angle theta can be solved for in the way that can be seen in Eq.~\eqref{inv_theta_calc_5} \begin{equation} theta = sin^{-1}(\frac{Z_g - d_2 \cdot sin(alpha) + d7}{(d_5 + d_6)}) \label{inv_theta_calc_5} \end{equation} Since the arm could rotate horizontally it could reach points on a circle, see Fig.~\ref{inv_beta_radius_img}, around itself where the radius of that circle, with the rotational center of the robot being the center of the circle, depended on the angle theta in a way that is described in Eq.~\eqref{beta_radius_eq}. \begin{figure}[h] \centering \includegraphics[width=\linewidth]{sections/assets/inv_beta_radius.png} \caption{distances \(h_1\) to \(h_3\) on arm model.} \label{inv_beta_radius_img} \end{figure} \begin{equation} r = (d_5 + d_6)\cdot cos(theta) - d_2\cdot cos(alpha) + d_1 \label{beta_radius_eq} \end{equation} calculations of x and y positions on a circle can be seen in Eq.~\eqref{circle_x} and \eqref{circle_y} \begin{equation} x = r\cdot cos(beta) \label{circle_x} \end{equation} \begin{equation} y = r\cdot sin(beta) \label{circle_y} \end{equation} substituting r to the expression from Eq.~\eqref{beta_radius_eq} the calculations of the x and y positions on the circle around the robot can be seen in Eq.~\eqref{circle_x_robot} and \ref{circle_y_robot} \begin{equation} \resizebox{0.8\hsize}{!}{$ x = ((d_5 + d_6)\cdot cos(theta) - d_2\cdot cos(alpha) + d_1)\cdot cos(beta) $ } \label{circle_x_robot} \end{equation} \begin{equation} \resizebox{0.8\hsize}{!}{$ y = ((d_5 + d_6)\cdot cos(theta) - d_2\cdot cos(alpha) + d_1)\cdot sin(beta) $ } \label{circle_y_robot} \end{equation} solving for beta Eq.~\eqref{circle_x_robot} and Eq.~\eqref{circle_y_robot} the following expressions was achieved \begin{equation} \resizebox{0.8\hsize}{!}{$ beta = cos^{-1}(\frac{x}{(d_5 + d_6)\cdot cos(theta) - d_2\cdot cos(alpha) + d_1}) $ } \label{Beta_x_robot} \end{equation} \begin{equation} \resizebox{0.8\hsize}{!}{$ beta = sin^{-1}(\frac{y}{(d_5 + d_6)\cdot cos(theta) - d_2\cdot cos(alpha) + d_1}) $ } \label{Beta_y_robot} \end{equation} these two expressions both have a denominator which is equal to zero for an angle \(theta = \pm \frac{\pi}{2}\), two angles that the robot can never reach due to the way the Lego hardware is put together. From Eq.~\eqref{Beta_x_robot} and Eq.~\eqref{Beta_y_robot} it is clear that only the desired x or the y position was needed to calculate beta, in this project the x positions were used. \subsubsection{Resolution} \label{sec:simon5} As mentioned before the motors used in the robot arm was two Lego EV3 servo motor large for making the arm move vertically and horizontally and one Lego EV3 servo motor medium inside the gripper to pick up boxes on the miniature industry platform. Both the Large and the medium motors had a resolution of 1 degree, meaning the motors gives 360 readings on one revolution. Also on the robot the cogwheel attached to the motor controlling vertical movement on the arm, see wheel0 in Fig.~\ref{Arm_model}, was connected to another cogwheel, see wheel1 in Fig. ~\ref{Arm_model}, which had 5 times as many teeth giving 5 times higher resolution for the arm itself than the motor in vertical movement. In the same way the cogwheel attached to the motor controlling horizontal movement of the arm was connected to another cogwheel with 3 times as many teeth giving 3 times higher resolution for the arm relative to the motor in horizontal movement. \subsubsection{Initialization} \label{sec:simon6} There is no memory in the motors which makes the robot unaware of the original orientation of the arm on boot. Therefore an initialization process was made for the robot using two Lego EV3 touch sensors. On boot the robot moved the arm with constant angular speed, in the beta direction, until a lego piece, moving with the arm, pressed one of the touch sensors at \(beta = -90^{\circ}\) then it stopped moving in that direction. In the same way the robot moved the arm in the positive theta direction until another lego piece getting closer to the other touch sensor with an increasing beta angle, pressed the second touch sensor at \(theta = 36^{\circ}\). then it stopped in that direction. At the point when both touch sensors was pressed and the arm had stopped moving in both directions the position of the arm was known. \subsection{Robot software} \label{sec:simon19} Two BrickPi3 motor shields were used to drive the motors and sensors on the robot. A complete library for controlling lego motors and sensors connected to the motor shields were developed by the manufacturers of the BrickPi3 together with the shields and was used to control the motors. Additionally P, PI and PID controllers was developed in this project to compare functions in the BrickPi3 library for controlling the motors and the movement on the arm. \subsubsection{P controller} \label{sec:simon7} A P controller, or proportional controller, is a feedback control structure where the output error is fed back to the motors to make them act proportional to the error. In the P controller made for controlling the movement on the robot arm an angular error was expressed, see Eq.~\eqref{ang_err} \begin{equation} e_{\theta} = \theta_{desired} - \theta \label{ang_err} \end{equation} where \(\theta\) is the current angular position of the arm and \(\theta_{desired}\) is the goal angle. The angular error was then fed back to the motors to make them act on it is a way that can be seen in Eq.~\eqref{P_control} \begin{equation} P = K_p\cdot e_{\theta} \label{P_control} \end{equation} where P is motor speed, proportional to the angular error and \(K_p\) is a proportional gain which determines how aggressive the controller should be. \subsubsection{PI controller} \label{sec:simon8} A PI controller or proportional integral controller is a proportional controller to which a term which integrates the error over time is added. The integration was done in a discrete fashion where, in every loop of the robot arm code running, the new calculated error was added to a integral error term, see Eq.~\eqref{integral_term} \begin{equation} e_{integral} = e_{integral} + e_{\theta} \label{integral_term} \end{equation} where \(e_{integral}\) is the discrete approximation of the integrated error, \(e_{\theta}\) is the same as in Eq.~\eqref{ang_err}. This error was adding up over time and the feedback to the motors can be seen in Eq.~\eqref{PI_control} \begin{equation} P = K_p\cdot e_{\theta} + K_i\cdot T_s\cdot e_{integral} \label{PI_control} \end{equation} where P is the motor speed, \(T_s\) is the sampling period of the code to make the integral term normalized in a sense, and \(K_i\) is the integral gain which determines how much the motors should act on the integral error. \subsubsection{PID controller} \label{sec:simon9} A PID or proportional integral derivative controller is a control structure where both a proportional, integral, and a derivative term is added. The differentiation was also done in a discrete fashion and was approximated as in Eq.~\eqref{derivative_term} \begin{equation} e_{diff} = e_{old} - e_{\theta} \label{derivative_term} \end{equation} where \(e_{diff}\) is the discrete approximation of the differentiated error, \(e_{old}\) is the old error from the previous loop of the code and \(e_{\theta}\) is the same as in Eq.~\eqref{ang_err}. The feedback to the motors was done according to Eq.~\eqref{PID_control} \begin{equation} P = K_p\cdot e_{\theta} + K_i\cdot e_{integral} + K_p\cdot e_{diff} \label{PID_control} \end{equation} \subsubsection{Tuning of the controllers} \label{sec:simon10} The tuning of the controllers was made with a Ziegler Nichols approach \parencite{FeedbackControl}, since a development of a dynamic model of the arm was not completed. The Ziegler Nichols tuning method required two measured parameters to be found. First the Oscillation gain which is the gain of a proportional controller \(K_p\) for which the output of the system acting on a step response is undamped oscillations. This was found by making the robot arm act on a step response several times while changing the value of the proportional gain \(K_p\). The second measured parameter is the oscillation time \(T_{osc}\), e.g the time it takes for the system to make one undamped oscillation period. This was found by looking at a plotted graph of the undamped oscillations and counting the measured values in one period. The calculations of the oscillation time could then be done as shown in Eq.~\eqref{osc_time} \begin{equation} T_{osc} = n\cdot T_s \label{osc_time} \end{equation} where \(T_{osc}\) is the oscillation time, n is the number of measured values in one undamped oscillation period from the output of the step response and \(T_s\) is the sampling period of the robot. \subsubsection{Controller for line following} The same operating speed was chosen for both the right and left track of the mobile platform, the speed of each track would not change from the operating speed as long as Error calculated in Eq.~\eqref{Error_Position} would remain zero. If the position of the line underneath the sensor array changed, the error would grow and the speed of both tracks would change. An architecture for the controller which minimizes the error could be observed in fig. \begin{figure}[h] \centering \includegraphics[width=\linewidth]{sections/assets/Control_system1.PNG} \caption{Block diagram of the PID controller constructed for the line following procedure} \label{Control_system1} \end{figure} \subsection{Software nodes} \label{sec:nodes} % What is a software node? The usage of LCM allows us to define ``software nodes'' which were briefly explained in section~\ref{sec:ROS}. A software node is a small stand-alone executable program, written in any language that usually has a single purpose in the system the project implements. For example, one node in the system reads data from the DWM; another approximates its position using a Kalman filter; another actuates the motors to pick up the object the system aims to displace; etc. If a node depends on data from another node, it is packaged in a message of a certain type and published on a channel. The depending node then subscribes to this channel, receives a message whenever one is published on that channel, and processes it. % How do the structure of nodes help? This structure have allowed us to write a set of smaller programs which are usually easier to implement and debug across multiple project members, instead of maintaining a monolithic program that does everything, which are usually harder to both implement and debug, especially when multiple project members need to contribute. % Here we'll summarize each node Each node in the project follow the naming scheme of \texttt{lcm-<type>-<desc>.<ext>} where \begin{description} \item[\texttt{<type>}] denotes the type of three possible alternatives: \texttt{source}, which denotes that the node only provides messages to the system; \texttt{sink}, only receives messages from the system; and \texttt{int} (for `intermediate'), both receives and generates messages. \item[\texttt{<desc>}] is a list of words interspaced by a \texttt{-} that provide a short description of the node's purpose. \item[\texttt{<ext>}] is the source file extension. It describes how the node should be built before it can be executed. For example, if the extension is \texttt{c} then the node was written and C and needs to be compiled and linked with a C compiler before the resulting binary can be executed. If the extension instead is \texttt{py} then the node was written in Python, and nothing must be done before the script can be executed, except for satisfying the script's dependencies. \end{description} The source files for all nodes can be found in \texttt{src/}. Fig.~\ref{fig:node-schema} contains a visualization of all system nodes and with which other nodes they talk to. Communication occurs over a common LCM channel name where only one message type is expected on each channel. The massage type can be derived from the channel name, and vice versa. This was done for sake of simplicity. \begin{figure}[h] \centering \begin{tikzpicture}[ node distance = 2cm, auto, ] % input nodes \node[block] (arrowhead) {Arrowhead}; \node[block, left of=arrowhead] (enc) {motor \\ encoders}; \node[block, left of=enc] (dwm) {DWM}; \node[block, right of=arrowhead] (line-follower) {line \\ follower}; % intermediate nodes \node[block, yshift=-6cm] at ($(enc)!0.5!(arrowhead)$) (master) {master}; \node[block, yshift=-2cm] at ($(dwm)!0.5!(enc)$) (kalman) {Kalman \\ filter}; \node[block, below of=kalman] (state) {system \\ state}; % % robot modes \node[block, below of=master] (object-mode) {Object \\ mode}; \node[block, left of=object-mode, xshift=-2em] (dwm-mode) {DWM \\ mode}; \node[block, right of=object-mode, xshift=2em] (line-mode) {Line-follow \\ mode}; \path[<->] (kalman) edge (state) (master) edge (object-mode); \path[->] (dwm) edge (kalman) (enc) edge (state) (state) edge (dwm-mode) (state) edge (master) (master) edge (dwm-mode) (master) edge (line-mode) (line-follower) edge (master) (line-follower) edge (line-mode) (enc) edge (object-mode) (arrowhead) edge (master); \end{tikzpicture} \caption{Relation chart over the system's nodes.} \label{fig:node-schema} \end{figure} All system nodes (and the source files under \texttt{src} which define them) are described below. \begin{description} \item[DWM node (\texttt{lcm-source-dwm.c})] This node is responsible for extracting the position and acceleration data from the DWM. Upon execution it opens opens a serial communication to the DWM, enters the shell mode and allocates a buffer into which received data is read into. Afterwards it enters a loop within which the shell command \texttt{av} is continuously called. While spinning in this loop, the node keeps the time and ensures that the command \texttt{apg} is called with a frequency of $10$~Hz. After the execution of each command: \begin{enumerate} \item the response is read into the buffer; \item the buffer is parsed for data of interest; \item data is converted to SI units --- m for position coordinates, $\text{ms}^{\text{-2}}$ for acceleration; and \item an appropriate message type is constructed and published unto either the \texttt{IO\_POSITION} or \texttt{IO\_ACCELERATION} channel. \end{enumerate} \item[Motor encoders node] \item[Arrowhead node (\texttt{lcm-source-arrowhead.py})] This node is responsible for listening on requests from the service provider and publish the message in LCM. The node is running a flask application which is listening on the service provider. To trigger the functions in the flask application a specific URL is required. The functions is then forwarding the messages to other nodes through LCM. It publish a message action; ``pick up'' or ``place'', and position; ``right'' or ``left''. The flask runs in http which is insecure. The secure mode (https) is not needed because the call for the URL is happening in the same device as the arrowhead node is running on. \item[Line follower node] \item[Kalman filter node (\texttt{lcm-int-dwm-filter.py})] \item[System state node (\texttt{lcm-int-system-state.py})] This node takes the filtered DMW data and the the motor encoders data and estimate the new position, as described in \eqref{eq:Disc_EOM}. By knowing the old positions, the new positions and the elapsed time (sampling time) it is also possible to estimate the velocities. The $x$ and $y$ velocities are required for Kalman filter to be able to make better estimations for the spacial position. This node also estimate the angle of attack and its derivative, i.e. the angular velocity. \item[Master node (\texttt{lcm-int-master.py})] As seen in Fig.~\ref{fig:state_machine}, the system's behavior can be described as a finite state machine. The master node is responsible for ensuring that the correct mode node (DWM, object or line-follow) is running at the right time, and thus implements the referenced state machine description. For example, before switching to line-follow mode, the system current state is compared to a set of points where the robot would be close enough to a station to find a line. If it is found that the system is too far away, the line-found event is ignored and logged to \texttt{stderr} for debugging purposes. \item[DWM mode node (\texttt{lcm-DWM-drive.py})] This is the node where the current position $(x, y, \theta)$ and the desired position are the inputs. By having this inputs it is possible to calculate the angular and spacial errors for the movement, as described in \eqref{eq:cart2polar} Two simple PID controllers are designed to make sure this errors go to zero, as shown in fig\ref{fig:PID1} and fig\ref{fig:V_PID}. The robot has arrived to the desired position when the errors are zeros. \item[Object mode node] \item[Line-follow mode node] \end{description} Some debugging nodes are also defined, but are external from the system-internal relations displayed in Fig.~\ref{fig:node-schema}: \begin{description} \item[Message spoofer (\texttt{lcm-debug-spoof-message.py})] While developing nodes it is useful to ensure that it responds to LCM messages in the expected manner. Instead of writing an ad-hoc node that sends whatever messages that need to be tested, this node allows the developer send a message of any type by just specifying the correct parameters. % Describe how the node works? Inspects the LCM-generated Python module. For example, to send a message indicating that the Kalman-filtered position of the system is at the coordinates $(4.23, 5.12)$, execute \texttt{lcm-debug-spoof-message.py KALMAN\_POSITION 4.23 5.12}. \item[Message sink (\texttt{lcm-debug-print-all.py})] An inverse of the above debug node: instead of sending a message of any type, this node prints a message of any type. Useful for inspecting the system as any and all LCM messages will be printed by first subscribing to all channels. \end{description} \subsection{Arrowhead clients} \label{sec:ruben2} \begin{figure*}[h] \centering \includegraphics[width=\linewidth]{sections/assets/arrowhead_sequence_diagram.png} \caption{The communication between clouds and the arrowhead node.} \label{fig:arrowhead_intercloud} \end{figure*} Fig.~\ref{fig:arrowhead_intercloud} shows the inter-cloud communcation and communication to the Arrowhead node. The Station Consumer sends a POST request for the ``place'' service and also sends a json with the position. The provider is then sending a GET request depending on the service and the position to the arrowhead node. When Arrowhead node receives the request, it will publish a message which other nodes are getting if they are subscribed to that channel. When the robot have executed the service, the conveyor belt sensor detects a piece on the conveyor belt and starts running. When it reaches the end sensor, the conveyor belt will stop and will notify the Station Consumer which will then send a request for the ``pick up'' services with a position. The request will then reach the robot and it will execute the service. \subsubsection{Provider} The purpose of the provider is to provide services which a consumer in the same arrowhead cloud or in another arrowhead cloud can consume. The provider is offering two services; ``pick up'' and ``place''. When running the provider it registers the system and its services to the Service Registry. The provider is using a secure https connection with self signed client certificate. The security of Arrowhead Framework is relying on SSL Certificate Trust Chains. The chain consist of three layers; Master certificate, Cloud certificates and Client certificates. The cloud certificate is created and signed from a master certificate's private key. The client certificates are created and signed from a cloud certificate' private key. In that way the whole chain is trustworthy. (\url{https://github.com/eclipse-arrowhead/core-java-spring}) The service functions in the provider is using POST http-method. When sending a request for a service, a json with the position where it should pick up is required. When the provider recieves a post request with a position, the provider is then sending a request to the lcm-source-arrowhead.py so called arrowhead node. The arrowhead node is then publishing a message to other nodes with a timestamp, ``pick up'' or ``place'' and position. The provider can either be running on the raspberry pi where the nodes are running or on another machine. If running on another machine a ssh connection is needed to communicate with the rasberry pi. The purpose of a ssh connection is to communicate with the arrowhead node in a secure way. SSH is a secure network protocol which encrypts messages that are going through the SSH connection. To allow other consumers to ask for services the provider is offering, intra- or inter-cloud is needed. Intra-cloud rules is needed if consumer is in the same cloud. If the consumer is in another cloud, inter-cloud rules need to be added. The intra and inter-cloud rules can be added in the Authorization system. The provider are using the client-library-python and is written in Python. \subsubsection{Consumer} The purpose of the consumer is to consume services which are offered by a provider in Arrowhead Service Registry. The consumer needs to manually be added as a system to the Service Registry and have intra- or inter-cloud rules for the services it want to consume. The consumer in the project were used for debuging and testing the provider. The sequence diagram in Fig.~\ref{fig:arrowhead_intercloud} shows the real system and the communication between. The Station Consumer is instead communicate with the provider. The consumer is using self signed certificates in the https connection which is the same as the provider. The consumer are using the client-library-python and is written in Python.
{ "alphanum_fraction": 0.7693708772, "avg_line_length": 69.648, "ext": "tex", "hexsha": "bc69a80642134310554043388a48a5258c4913de", "lang": "TeX", "max_forks_count": 11, "max_forks_repo_forks_event_max_datetime": "2021-08-04T12:05:42.000Z", "max_forks_repo_forks_event_min_datetime": "2020-09-01T08:49:47.000Z", "max_forks_repo_head_hexsha": "6e5f2ca43168ca4736c56c40d8535719ea7ce82f", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "rubenasplund/ed7039e", "max_forks_repo_path": "report/sections/implementation.tex", "max_issues_count": 31, "max_issues_repo_head_hexsha": "6e5f2ca43168ca4736c56c40d8535719ea7ce82f", "max_issues_repo_issues_event_max_datetime": "2021-01-06T17:52:17.000Z", "max_issues_repo_issues_event_min_datetime": "2020-09-04T09:10:34.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "rubenasplund/ed7039e", "max_issues_repo_path": "report/sections/implementation.tex", "max_line_length": 912, "max_stars_count": 1, "max_stars_repo_head_hexsha": "6e5f2ca43168ca4736c56c40d8535719ea7ce82f", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "rubenasplund/ed7039e", "max_stars_repo_path": "report/sections/implementation.tex", "max_stars_repo_stars_event_max_datetime": "2021-08-03T15:22:33.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-03T15:22:33.000Z", "num_tokens": 14875, "size": 60942 }
\section{Orthogonal Matching Pursuit} \label{JM:sec:GOMP} In the following section, preconditioned generalized orthogonal matching pursuit (GOMP) \cite{JMTongEtAl2020} will be implemented. Matching pursuit algorithms are a class of greedy algorithms designed to solve the sparse signal recovery problem \begin{equation} \begin{aligned} \min_x \quad & \left\lVert x \right\rVert_0 \\ \textrm{s.t.} \quad & y = \Psi x \end{aligned} \end{equation} where $x \in \mathbb{R}^n$ represents an unknown, $K$ sparse basis for a signal $y \in \mathbb{R}^m$ in $\Psi \in \mathbb{R}^{m \times n}$, also called the sampling matrix. This is done by iteratively reducing the estimation error of the recovered signal by selecting $S$ most similar, new components from the sampling matrix.\\ The main algorithm can be described as follows \begin{algorithm}[H] \SetAlgoLined \KwResult{$x$} \KwData{$y$, $\Psi$, $K$, $S = 1$} Projection onto span\; $P = \Psi^T \left(\Psi\Psi^T\right)$; $\tilde{y} = P y$; $\tilde{\Psi} = P \Psi$\; Initialize residual and support\; $r = y$; $\Lambda = \emptyset$\; \While{Not converged}{ $\Omega = \delta_S\left( \left| \tilde{\Psi}^T~r \right| \right)$\; $\Lambda = \Lambda \cup \Omega$\; $x = \min \left\lVert \tilde{y} - \tilde{\Psi}u \right\rVert_2~, \quad supp(u) = \Lambda$\; $r = \tilde{y} - \tilde{\Psi} x$\; }; \end{algorithm} Here $\delta_S\left( x \right) : \mathbb{R}^n \mapsto \lbrace 0, 1 \rbrace^n$ denotes (with abuse of notation) the mapping of the $S$ largest elements onto a corresponding indicator vector. A similar notation to the pseudocode above can be achieved in \textit{Julia}. For brevity, only necessary elements of the source code will be shown, which can be found in detail in REF. The function is defined as follows \lstinputlisting[language=Julia,firstline=4, lastline=6]{../scripts/gomp.jl} First, we will focus on the preconditioning via the matrix $P \in \mathbb{R}^{m \times m}$ mapping onto the column span of $\Psi$ \lstinputlisting[language=Julia,firstline=22, lastline=26]{../scripts/gomp.jl} Next, the iterative computation of the support, corresponding coefficients and residuals is performed \lstinputlisting[language=Julia,firstline=37, lastline=50]{../scripts/gomp.jl} As can be seen from above, the implementation is quite similar to its mathematical counterpart, enhancing the interpretability and readability. The results of the algorithm are shown in Fig. \ref{JM:fig:GOMP}, where a 100 dimensional sparse vector $x$ has been recovered. \begin{figure} \includegraphics[width = 0.9\textwidth]{../figures/merged.pdf} \caption{Performance of GOMP for a single sparse 100 dimensional vector (left) and with different sparsities (right). The signal $y$ is of dimension 300 and has been generated by a random dense sampling matrix. GOMP has been used with $K=50, S=1$ and run for at most 100 iterations with a absolute tolerance $\epsilon = 0.1$.} \label{JM:fig:GOMP} \end{figure} \newpage
{ "alphanum_fraction": 0.7106979778, "avg_line_length": 49.4516129032, "ext": "tex", "hexsha": "4dfcfff95924658d8d5d21822415a25238f2a691", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1adf9984f2948d3b990036ffb616fd1b2ffc2ae6", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "AlCap23/ScientificComputingSeminar2021", "max_forks_repo_path": "report/contents/04_matching.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1adf9984f2948d3b990036ffb616fd1b2ffc2ae6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "AlCap23/ScientificComputingSeminar2021", "max_issues_repo_path": "report/contents/04_matching.tex", "max_line_length": 238, "max_stars_count": null, "max_stars_repo_head_hexsha": "1adf9984f2948d3b990036ffb616fd1b2ffc2ae6", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "AlCap23/ScientificComputingSeminar2021", "max_stars_repo_path": "report/contents/04_matching.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 902, "size": 3066 }
\documentclass[9pt, a4paper, oneside, reqno]{amsart} \usepackage[final]{pdfpages} \usepackage{wrapfig} \usepackage{enumitem} \usepackage{parskip} \usepackage{fancyhdr} \usepackage{color} \usepackage{multicol} \renewcommand{\thefootnote}{\fnsymbol{footnote}} \newcommand{\hint}[1]{\footnote{\raggedleft\rotatebox{180}{Hint: #1\hfill}}} \pagestyle{fancy} \newlist{questions}{enumerate}{1} \setlist[questions, 1]{label = \bf Q.\arabic*., itemsep=1em} \lhead{\scshape Apurva Nakade} \rhead{\scshape Honors Single Variable Calculus} \renewcommand*{\thepage}{\small\arabic{page}} \title{Problem Set 11} \begin{document} \maketitle \thispagestyle{fancy} \section*{Part 1 - Sequences} Q.1. and Q.3. are basically proofs from the book, if you get stuck you should consult them. \begin{questions} \item \begin{enumerate} \item Let $ a_n$ be a sequence such that $ a_n \neq c$, for any $ n$, and $ \lim \limits_{n \rightarrow \infty} a_n = c$. Using the definition, show that if $ \lim \limits_{x \rightarrow c}f(x) = l$ then $ \lim \limits_{n \rightarrow \infty}f(a_n) = l$. \item Write down the definitions of $ \lim \limits_{x \rightarrow c}f(x) \not = l$ and $ \lim \limits_{n \rightarrow \infty}a_n \neq k$. \item Show that if $ \lim \limits_{x \rightarrow c}f(x) \not = l$ then there is a sequence $ a_n$ such that $ \lim \limits_{n \rightarrow \infty} a_n = c$ and $ \lim \limits_{n \rightarrow \infty}f(a_n) \neq l$. \item Come up with a definition for $ \lim \limits_{n\rightarrow \infty} a_n = \infty$. Let $ a_n$ be a sequence such that $ \lim \limits_{n \rightarrow \infty} a_n = \infty$. Show that if $ \lim \limits_{x \rightarrow \infty} f(x) = l$ then $ \lim \limits_{n \rightarrow \infty}f(a_n) = l$. \end{enumerate} \item Do problem 1 (i - ix) and 2 from the book on Pg. 453-454. There are a lot of problems but as before all of them are very short and require a very small argument. You do not have to be very rigorous for these problems, for most (but not all) problems you can use the part (4) of Q.1. \item \begin{enumerate} \item Find a sequence $ a_n$ such that $ a_n$ is bounded from both above and below but $ \lim \limits_{n \rightarrow \infty} a_n$ does not exist. No proof needed, draw a picture. \item Let $ a_n$ be a non-increasing sequence bounded below. Let $ A = \{ a_n \} $. \begin{enumerate} \item Argue that $ \inf A$ exists. \item Show that $ \lim \limits_{n \rightarrow \infty} a_n = \inf A$. \end{enumerate} \end{enumerate} \item \begin{enumerate} \item Prove that if $ 0 < a < 2$ then $ a < \sqrt{2a} < 2$. \item Prove that the sequence \begin{align*} \sqrt{2}, \sqrt{2\sqrt{2}}, \sqrt{2\sqrt{2\sqrt{2}}}, \cdots \end{align*} converges using Theorem 2. \item Find the limit. \end{enumerate} \item Let $ 0 < a_1 < b_1$ and define \begin{align*} a_{n+1} = \sqrt{a_n b_n} \mbox{ and } b_{n+1} = \dfrac{a_n + b_n}{2} \end{align*} \begin{enumerate} \item Use Theorem 2 to show that the sequence $ a_n$ and $ b_n$ converge. \item Show that they converge to the same limit. \end{enumerate} \end{questions} \newpage \section*{Part 2 - Sequences continued} \begin{questions}[resume] \iffalse \item Prove directly using the definition of Cauchy sequence, that if $ a_n$ is a non-decreasing sequence bounded from above then $ a_n$ is a Cauchy sequence.\hint{Estimate $ a_n - a_m$ using $ \sup \{ a_n \}$.} \item Q.9 from Ch.22 on Pg.455. \fi \item \begin{enumerate} \item Using integrals show that \begin{align*} \dfrac{1}{n+1} < \log(n+1) - \log(n) < \dfrac{1}{n} \end{align*} \item Using Theorem 2 show that the following sequence converges. \begin{align*} a_n = 1 + \dfrac{1}{2} + \dfrac{1}{3} + \cdots + \dfrac{1}{n} - \log(n) \end{align*} \end{enumerate} \item Prove that if $ \lim \limits_{n \rightarrow \infty} a_n = l$ then \begin{align*} \lim \limits_{n \rightarrow \infty} \dfrac{a_1 + a_2 + \cdots + a_n}{n} = l \end{align*} (Hint: As $ a_n$ converges to $ l$, for every $ \epsilon > 0$ there is some constant $ N$ such that for all $ n > N$ the inequality $ |a_n - l| < \epsilon$ holds. For $ n> N$ break $ \frac{a_1 + a_2 + \cdots + a_n}{n}$ into two fractions and show that one fraction can be made arbitrarily small and the second fraction is close to $ l$.) \end{questions} \newpage \section*{Part 3 - Integral Computations} \begin{questions}[resume] \item For this week do Q.4 problems - vi) to x) and Q.5) on Pg. 379-380 from Ch.19. \\\\ \end{questions} \end{document}
{ "alphanum_fraction": 0.6623320329, "avg_line_length": 39.1016949153, "ext": "tex", "hexsha": "4304e95b8dccf11f02042bea9087051867ccb3d7", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5b6cb3dde364990abe868ce155a697dce78302fa", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "apurvnakade/jhu2017-18-honors-single-variable-calculus", "max_forks_repo_path": "2017/PSet11.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5b6cb3dde364990abe868ce155a697dce78302fa", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "apurvnakade/jhu2017-18-honors-single-variable-calculus", "max_issues_repo_path": "2017/PSet11.tex", "max_line_length": 338, "max_stars_count": null, "max_stars_repo_head_hexsha": "5b6cb3dde364990abe868ce155a697dce78302fa", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "apurvnakade/jhu2017-18-honors-single-variable-calculus", "max_stars_repo_path": "2017/PSet11.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1617, "size": 4614 }
\documentclass{ucll-slides} \usepackage{pxfonts} \usepackage[utf8]{inputenc} \usepackage{tikz} \usepackage{calc} \usetikzlibrary{calc,shadows,tikzmark} \coursename{Distributed Applications} \title{intro} \begin{document} \maketitle \section{Course overview} \begin{frame} \frametitle{Who we are} \begin{center} \large Wannes Fransen \\[4mm] Frédéric Vogels \end{center} \end{frame} \begin{frame} \frametitle{Classes - contacturen} \begin{itemize} \item 12 $\times$ 2 hours = 24hrs \item 3stp $\Rightarrow$ 3 $\times$ 25--30hrs $\Rightarrow$ 75--90 hours \end{itemize} \end{frame} \begin{frame} \frametitle{Evaluation} \structure{Project} \begin{itemize} \item Part 1 : 4/20 (PE with feedback) \item Part 2 : 6/20 (PE with feedback) \item Part 3 : 8/20 \end{itemize} \vskip4mm \structure{Oral exam} \begin{itemize} \item You can score -8, up to +2 points on the exam. \end{itemize} \end{frame} \begin{frame} \frametitle{Distributed system - According to wikipedia} While there is no single definition of a distributed system, the following defining properties are commonly used as: \begin{itemize} \item There are several autonomous computational entities (computers or nodes), each of which has its own local memory. \item The entities communicate with each other by message passing. \end{itemize} \end{frame} \begin{frame} \frametitle{Language} Elixir \begin{itemize} \item Erlang VM \item Functional \item Process-oriented \item Fault-tolerant \end{itemize} \end{frame} \end{document}
{ "alphanum_fraction": 0.6674500588, "avg_line_length": 23, "ext": "tex", "hexsha": "3652026f2c86e9e30f2c2a6f2e44fb73c9ee8209", "lang": "TeX", "max_forks_count": 32, "max_forks_repo_forks_event_max_datetime": "2020-10-06T15:01:47.000Z", "max_forks_repo_forks_event_min_datetime": "2019-09-19T03:25:11.000Z", "max_forks_repo_head_hexsha": "06743e4e2a09dc52ff52be831e486bb073916173", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "DennisWinnepenninckx/distributed-applications", "max_forks_repo_path": "slides/intro/intro.tex", "max_issues_count": 22, "max_issues_repo_head_hexsha": "06743e4e2a09dc52ff52be831e486bb073916173", "max_issues_repo_issues_event_max_datetime": "2020-03-16T14:43:06.000Z", "max_issues_repo_issues_event_min_datetime": "2019-06-19T18:58:13.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "DennisWinnepenninckx/distributed-applications", "max_issues_repo_path": "slides/intro/intro.tex", "max_line_length": 127, "max_stars_count": 1, "max_stars_repo_head_hexsha": "06743e4e2a09dc52ff52be831e486bb073916173", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "DennisWinnepenninckx/distributed-applications", "max_stars_repo_path": "slides/intro/intro.tex", "max_stars_repo_stars_event_max_datetime": "2021-09-22T09:52:11.000Z", "max_stars_repo_stars_event_min_datetime": "2021-09-22T09:52:11.000Z", "num_tokens": 509, "size": 1702 }
\documentclass{document} \usepackage{amsmath} \usepackage{amssymb} \usepackage{cite} \usepackage{cleveref} \usepackage{dfadobe} \usepackage[T1]{fontenc} \ifpdf \usepackage[pdftex]{graphicx} \pdfcompresslevel=9 \else \usepackage[dvips]{graphicx} \fi \usepackage{pseudocode} \usepackage{subcaption} \newcommand{\diff}{\mathrm d} \title[Real-time High-Quality Rendering of Non-Rotating Black Holes]% {Real-time High-Quality Rendering of Non-Rotating Black Holes} \author[E. Bruneton]{\parbox{\textwidth}{\centering Eric Bruneton}} \begin{document} % Image 1: %http://localhost:8000/black_hole/demo/demo.html?or=784&oi=1059&cy=35921&os=380&r=24.9624129252286&dr=0.000047700638275464085&phi=11.666205678340063&sfy=976&sfp=474&sfr=1801&ce=315&bhm=1000 % Image 2: %http://localhost:8000/black_hole/demo/demo.html?or=909&oi=609&cy=35550&os=353&sfy=306&sfp=1290&sfr=760&ce=479&bhm=1000&dd=385&do=0&r=33.23656820909121&dr=-0.00007842194532333557&phi=5.559730122554396 % Image 3: %http://localhost:8000/black_hole/demo/demo.html?or=909&oi=1059&cy=1167&os=353&sfy=1169&sfp=973&sfr=1247&ce=604&bhm=1000&dd=307&do=0&r=33.23656820909121&dr=-0.00007842194532333557&phi=5.559730122554396 % Image 4: %http://localhost:8000/black_hole/demo/demo.html?or=909&oi=1003&cy=31708&os=839&sfy=1255&sfp=1019&sfr=1804&ce=479&bhm=1000&dd=0&do=0&od=1045&r=7.566939796965649&dr=-0.23150949272631777&phi=1.2668247297019726 \teaser{ \vspace{-12pt} \includegraphics[width=\linewidth]{figures/teaser.jpg} \centering \caption{Some results obtained with our method. {\em Left}: distorted images of an accretion disc due to gravitational light bending, with relativistic Doppler and beaming effects. {\em Middle}: gravitational lensing creates several amplified images of each punctual star and creates Einstein rings. {\em Right}: near the speed of light, light is amplified and blue-shifted ahead, and is reduced and red-shifted behind. \vspace{12pt}} \label{fig:teaser} } \maketitle \begin{abstract} We propose a real-time method to render high-quality images of a non-rotating black hole with an accretion disc and background stars. Our method is based on beam tracing, but uses precomputed tables to find the intersections of each curved light beam with the scene in constant time per pixel. It also uses a specific texture filtering scheme to integrate the contribution of the light sources to each beam. Our method is simple to implement and achieves high frame rates. \end{abstract} \section{Introduction} Black holes are strange objects which recently got a lot of public exposure with the Interstellar movie~\cite{James2015}, the detection of gravitational waves from merging black holes\cite{GRWave2016}, and the first image of a black hole~\cite{EHT2019}. A real-time, high-quality visualization of a black hole could help the public in getting an intuitive "understanding" of their properties, for instance in planetariums or in 3D astronomy software. It could also be useful in space games. In this context, we propose a real-time high-quality rendering method for non-rotating black holes, with 2 contributions: a precomputation method for constant time beam tracing, and a texture filtering scheme to compute the contribution of the light sources to each beam. We present the related work in Section~\ref{sec:relatedwork}, our model in Section~\ref{sec:model} and its implementation in Section~\ref{sec:implem}. We conclude with a discussion of our results, limitations and future work in Sections~\ref{sec:results} and~\ref{sec:conclusion}. \section{Related work}\label{sec:relatedwork} Black hole visualization has a long history starting with~\cite{Luminet1979}, and summarized in~\cite{Luminet2019}. Offline rendering methods generally use beam tracing in curved space-time, support rotating black holes and produce very high-quality images~\cite{Hamilton2014,Riazuelo2014,James2015}. However, they are complex to implement and are not interactive ({\em e.g.} an IMAX Interstellar frame requires at least 30 minutes with 10 cores and the renderer has 40kLoC~\cite{James2015}). Physically accurate general relativistic magnetohydrodynamics simulations of accretion discs are even more complex and require super-computers~\cite{MNRAS2018}. \begin{figure*}[htb] \centering \includegraphics[width=\linewidth]{figures/notations} {\phantomsubcaption\label{fig:notations:a}} {\phantomsubcaption\label{fig:notations:b}} {\phantomsubcaption\label{fig:notations:c}} {\phantomsubcaption\label{fig:notations:d}} \caption{Notations. {\em (\subref{fig:notations:a})} the camera reference frame and image plane (in red) and a curved light ray (in blue) intersecting the accretion disc. {\em (\subref{fig:notations:b})} in the plane containing the light ray, the initial ray angle is noted $\delta$ and the accretion disc inclination $\alpha$. {\em (\subref{fig:notations:c})} $\delta$ verifies $\tan\delta = r \diff\varphi / \diff r$ (in green), {\em i.e.} $\delta = \pi - \arctan2(u, \dot{u})$. {\em (\subref{fig:notations:d})} the deflection $\Delta$ verifies $\Delta = \varphi + \delta - \pi$. The ray is symmetric around the axis through its apsis (in red).} \end{figure*} Our work is more related to interactive visualization methods, usually restricted to non-rotating black holes to reduce the complexity of the problem. \cite{Muller2010} render about $120000$ background stars around a black hole, whose apparent positions, colors and intensities change due to the gravitational effects. Each star is rendered with a point primitive, and its projection(s) on screen are found in constant time by using a precomputed $4096\times4096$ lookup table. \cite{Muller2011} render a torus and a background night sky texture for an observer orbiting a black hole, with a ray-tracing method. Ray intersections with the scene are found in constant time thanks to lookup tables precomputed with a parallelized code (for a fixed orbit radius). \cite{Muller2012} use ray-tracing to render an accretion disc around a black hole. Ray intersections are found in constant time by using an analytic expression involving the Jacobi-sn function (evaluated with arithmetic-geometric, complex number series). In comparison, our method uses only two small tables ($512\times512$ and $64\times32$) which are very fast to precompute. They are used to find, in constant time, the intersection(s) of curved light beams with the accretion disc and millions of background stars (stored in a cubemap with a specific filtering scheme). \section{Model}\label{sec:model} Our goal is to render a non-rotating black hole with an accretion disc and background stars, illustrating the effects of gravitation on light. Simulating a realistic accretion disc is {\em not} a goal: we thus use a basic, infinitely thin disc model instead. However, we want to get real-time {\em and} high-quality images, which is not easy: \begin{itemize} \item a simple ray-marching algorithm can render a sky map texture distorted by a black hole in real-time, but not with a high quality ({\em e.g.} stars become curved segments instead of staying punctual), \item conversely, offline beam-tracing methods produce high-quality images but are not real-time~\cite{James2015}. \end{itemize} To this end, we propose a "precomputed beam tracing" method: for each pixel, we initialize a light beam, compute its intersections with the scene using precomputed tables, and then the light received from the intersected objects. These 3 steps are explained below, after a very short introduction to the Schwarzschild metric. \subsection{Schwarzschild metric} The space-time geometry around a non-rotating black hole can be described with the Schwarzschild metric. In units such that the radius of the black hole's event horizon and the speed of light are 1, this metric is \begin{equation} \diff s^2 = \left(1 - \frac{1}{r}\right) \diff t^2 - \left(1 - \frac{1}{r}\right)^{-1} \diff r^2 - r^2 \diff \theta^2 - r^2 \sin^2\theta \diff \phi^2 \label{eq:metric} \end{equation} where $\diff s$ is the line element and $(t, r, \theta, \phi)$ are the Schwarzschild coordinates~\cite{weinberg1972}. $r, \theta, \phi$ are (pseudo-)spherical coordinates, and in the following we also use the corresponding (pseudo-)Cartesian coordinates $x,y,z$, as well as the inverse radius $u\triangleq 1/r$. In particular, the Cartesian coordinates of an orthonormal basis $\vec{e}_t, \vec{e}_r, \vec{e}_\theta, \vec{e}_\phi$ for a static observer at $(t, r, \theta, \phi)$ are, respectively (see Fig.~\ref{fig:notations:a}) \begin{equation} \frac{1}{\sqrt{1-u}} \begin{bmatrix} 1\\ 0\\ 0\\ 0 \end{bmatrix}, % \sqrt{1-u} \begin{bmatrix} 0\\ \sin\theta \cos\phi\\ \sin\theta \sin\phi\\ \cos\theta \end{bmatrix}, % \begin{bmatrix} 0\\ \cos\theta \cos\phi\\ \cos\theta \sin\phi\\ -\sin\theta \end{bmatrix}, % \begin{bmatrix} 0\\ -\sin\phi\\ \cos\phi\\ 0 \end{bmatrix} \label{eq:staticbasis} \end{equation} \subsection{Beam initialization} The first step of our method is to compute, for each pixel, the initial direction of the corresponding light beam. As \cite{Muller2010}, and in order to simplify the next steps, we take advantage of some symmetries to reduce this direction to a single angle $\delta$, as shown below. Let $p=(p^t, p^r, p^\theta, p^\phi)$ be the camera position in Schwarzschild coordinates, and $\Lambda$ the Lorentz transformation~\cite{weinberg1972} specifying the camera orientation and velocity with respect to a static observer at $p$. An orthonormal basis for the camera is thus $\vec{e}_{\tau}, \vec{e}_w, \vec{e}_h, \vec{e}_d$ (see Fig.~\ref{fig:notations:a}), given by \begin{equation} \vec{e}_i = {\Lambda_i}^j \vec{e}_j, \quad i \in \{\tau, w, h, d\}, \quad j \in \{t, r, \theta, \phi\} \label{eq:camerabasis} \end{equation} where $\vec{e}_{\tau}$ is the camera 4-velocity and $\vec{e}_w, \vec{e}_h, \vec{e}_d$ define its orientation. For a pinhole camera with focal length $f$, and since beams are traced backward, the initial beam direction $\mathbf{d}$ for a pixel with screen coordinates $q^w, q^h$ is (see Fig.~\ref{fig:notations:a}) \begin{equation} {\mathbf d} = -{\mathbf e}_{\tau} + \frac{q^w {\mathbf e}_w + q^h {\mathbf e}_h - f {\mathbf e}_d}{\sqrt{(q^w)^2 + (q^h)^2 + f^2}} \label{eq:d} \end{equation} where ${\mathbf v}$ denotes the projection of $\vec{v}$ on the $\vec{e}_r, \vec{e}_\theta, \vec{e}_\phi$ hyperplane. We now take advantage of the spherical symmetry of the metric, and of the fact that its geodesics are planar~\cite{weinberg1972}, to reduce ${\mathbf d}$ to a single angle. Let $(t, r, \vartheta, \varphi)$ be {\em rotated} Schwarzschild coordinates such that the beam's axial ray is contained in the equatorial plane $\vartheta = \pi/2$. They can be defined as the (pseudo-)spherical coordinates corresponding to the following new orthonormal basis vectors (for the Euclidean metric -- see Fig.~\ref{fig:notations:b}): \begin{equation} {\mathbf e}_{x'} \triangleq \frac{{\mathbf p}}{p^r} \quad {\mathbf e}_{y'} \triangleq \frac{{\mathbf e}_{z'} \wedge {\mathbf e}_{x'}} {\Vert {\mathbf e}_{z'} \wedge {\mathbf e}_{x'} \Vert} \quad {\mathbf e}_{z'} \triangleq \frac{{\mathbf e}_{x'} \wedge {\mathbf d}} {\Vert {\mathbf e}_{x'} \wedge {\mathbf d} \Vert} \label{eq:raybasis} \end{equation} In these rotated coordinates the metric~\eqref{eq:metric} keeps the same form and the light beam starts from $(p^t, p^r, \pi/2, 0)$ with an initial angle $\delta \triangleq \arccos({\mathbf e}_{x'} \cdot {\mathbf d} / \Vert {\mathbf d} \Vert)$ from the $x'$ axis (see Fig.~\ref{fig:notations:b}). Finally, note that in the $\vartheta = \pi/2$ plane the accretion disc becomes two line segments at angles $\alpha$ and $\alpha + \pi$ from the $x'$ axis, with \begin{equation} \alpha = \arccos({\mathbf e}_{x'} \cdot {\mathbf t}) \quad {\mathbf t} \triangleq \pm{\mathbf e}_z\wedge{\mathbf e}_{z'} / \Vert {\mathbf e}_z \wedge {\mathbf e}_{z'} \Vert \label{eq:alpha} \end{equation} and where the sign is chosen such that ${\mathbf t} \cdot {\mathbf e}_{y'} \ge 0$ (see Fig.~\ref{fig:notations:b}). \subsection{Beam tracing}\label{sec:beamtracing} The second step of our method is to compute the beam intersections with the scene, and the light emitted there. For this we first need to determine the geodesic followed by the beam's axial ray. For light rays $\diff s = 0$, and there exist curvilinear coordinates $\sigma$, defined up to an affine transform, such that $(1-u)\diff t / \diff \sigma$ and $r^2 \sin^2\vartheta \diff \varphi /\diff \sigma$ are constant along the ray~\cite{weinberg1972}. We can thus choose $\sigma$ such that the second constant is $1$, leading to \begin{equation} (1 - u)\frac{\diff t}{\diff \sigma} = e \quad \mathrm{and} \quad \sin^2\vartheta \frac{\diff \varphi}{\diff \sigma} = u^2 \label{eq:motionconstants} \end{equation} where $e$ happens to be the inverse of the ray's impact parameter (see Fig.~\ref{fig:notations:d}). By substituting this in \eqref{eq:metric} with $\diff s = 0$ and $\vartheta = \pi/2$ we get the geodesic equation \begin{equation} \dot{u}^2 \triangleq \left(\frac{\diff u}{\diff \varphi}\right)^2 = e^2 - u^2 (1 - u) \quad \Rightarrow \quad \ddot{u} = \frac{3}{2} u^2 - u\label{eq:rayequation} \end{equation} Integrating this numerically at each pixel, with a high precision, would be too slow (see Section~\ref{sec:results}). Alternatively, the analytic solution for $u(\varphi)$, using the Jacobi-sn function (not available on GPU), could be implemented with numerical series~\cite{Muller2012}. However, we also need the retarded time (to animate the accretion disc) and the light ray deflection (for the stars). To compute all this easily and efficiently, we use instead two small precomputed tables. We explain below how we precompute and use these tables to find the beam intersections, thanks to some ray properties that we present first. \subsubsection{Ray properties}\label{sec:properties} Light rays can be divided in 3 types. If $e^2$ is larger than the maximum $\mu \triangleq 4 / 27$ of $u^2 (1 - u)$ over $[0,1]$, reached at the {\em photon sphere} $u = 2 / 3$, then \eqref{eq:rayequation} shows that all values of $u$ are possible. The light ray thus comes from infinity into the black hole, or vice-versa. Otherwise, some values around $2 / 3$ are excluded. The ray either stays in the (empty) region $u > 2 / 3$, or comes from infinity, reaches an apsis $u_a < 2 / 3$, and goes back to infinity. In the later case $u_a$ is given by setting $\dot{u} = 0$ in~\eqref{eq:rayequation}: \begin{equation} u_a=\frac{1}{3} + \frac{2}{3} \sin\left(\frac{1}{3} \arcsin\left(\frac{2e^2}{\mu} - 1\right)\right) \label{eq:uapsis} \end{equation} and the light ray is unchanged by the reflection $\varphi \rightarrow 2\varphi_a - \varphi$ (see Fig.~\ref{fig:notations:d}). In any case, $\varphi \rightarrow -\varphi$ changes a solution of~\eqref{eq:rayequation} into another, with $e, \sigma, \dot{u}$ and $\alpha$ changed into their opposite and $\delta$ into $\pi - \delta$. \begin{algorithm}[htb] \centering \begin{pseudocode} \PROCEDURE{Precompute}{\epsilon} \FORALL e \ge 0 \DO \BEGIN t\GETS 0,\ u \GETS 0,\ \dot{u} \GETS e,\ \varphi \GETS 0,\ \diff\varphi \GETS \epsilon \\ \WHILE u < 1 \AND (\dot{u} \ge 0 \OR \varphi < \pi) \DO \BEGIN \IF \dot{u} \ge 0 \THEN {\mathbb D}(e,u) \GETS [t,\ \Delta=\varphi - \mathrm{arctan2}(u, \dot{u})]\\ \IF \varphi < \pi \THEN {\mathbb U}(e,\varphi) \GETS [t,\ u]\\ \IF u > 0 \THEN t \GETS t + e\,\diff\varphi / (u^2 - u^3)\\ \dot{u}\GETS \dot{u}+(3u^2/2-u)\diff\varphi,\ u \GETS u+\dot{u}\diff\varphi,\ \varphi \GETS \varphi+\diff\varphi \END \END \ENDPROCEDURE \\ \PROCEDURE{TraceRay}{p^r,\delta,\alpha,u_{ic},u_{oc}} u \GETS 1/p^r,\ \dot{u} \GETS -u\cot\delta,\ e^2 \GETS \dot{u}^2 + u^2(1 - u)\\ \IF e^2 < \mu \AND u > 2/3 \THEN \RETURN{\infty,\emptyset}\\ s \GETS \mathrm{sign}(\dot{u}),\ [t, \Delta] \GETS {\mathbb D}(e,u),\ [t_a, \Delta_a] \GETS {\mathbb D}(e,u_a)\\ \varphi \GETS \Delta + (s = 1\ ?\ \pi-\delta\ :\ \delta) + s \alpha,\ \varphi_a \GETS \Delta_a + \pi / 2\\ \varphi_0 \GETS \varphi \mod \pi,\ [t_0,u_0] \GETS {\mathbb U}(e, \varphi_0), I \GETS \emptyset\\ \IF \varphi_0 < \varphi_a \AND u_{oc} \le u_0 \le u_{ic} \AND \mathrm{sign}(u_0 - u) = s \THEN \DO \BEGIN I \GETS I \cup [s(t_0-t),\ u_0,\ \alpha + \varphi - \varphi_0]\\ \END\\ \IF e^2<\mu \AND s=1 \THEN \DO \BEGIN \varphi \GETS 2\varphi_a - \varphi,\ \varphi_1 \GETS \varphi \mod \pi,\ [t_1,u_1] \GETS {\mathbb U}(e, \varphi_1)\\ \IF \varphi_1 < \varphi_a \AND u_{oc} \le u_1 \le u_{ic} \THEN \DO \BEGIN I \GETS I \cup [2t_a-t-t_1,\ u_1,\ \alpha + \varphi - \varphi_1]\\ \END\\ \END\\ \IF \dot{u}>0 \THEN \Delta \GETS (e^2<\mu\ ?\ 2\Delta_a-\Delta\ :\ \infty)\\ \RETURN{\delta'=\delta+\Delta,I}\\ \ENDPROCEDURE \end{pseudocode} \caption{\label{fig:algo} \textsc{Precompute} is based on~\eqref{eq:motionconstants}, \eqref{eq:rayequation} and the properties illustrated in Fig.~\ref{fig:notations:d}. \textsc{TraceRay} uses the properties and symmetries presented in Section~\ref{sec:properties}.} \end{algorithm} \subsubsection{Precomputations and beam tracing: background stars} For background stars we first compute the beam's escape angle, and then sum the light emitted by all the stars in the beam's footprint on the celestial sphere, around this escape direction. \paragraph*{Escape angle} Let $\delta'$ be the beam's escape angle (or $\infty$ if it falls into the black hole), measured from the $x'$ axis. For efficient rendering, $\delta'$ could be precomputed for all initial conditions $p^r, \delta$. But this would yield an $O(n^3)$ algorithm. Instead, we precompute the deflection $\Delta$ of rays coming from infinity (see Fig.~\ref{fig:notations:d}) in a $\mathbb{D}(e, u)$ table, for all $e \ge 0$ and $u < 1$ or $u \le u_a$ (depending on $e$ and taking advantage of the above symmetries). This gives a trivial $O(n^2)$ algorithm (see Algorithm~\ref{fig:algo} -- we use the Euler method but Runge-Kutta or other methods are possible too). At runtime, we compute $\delta'$ as $\delta + \Delta$ or $\delta + \Delta_{\infty} - \Delta = \delta + 2\Delta_a - \Delta$, depending on the ray direction (see Fig.~\ref{fig:notations:d} and Algorithm~\ref{fig:algo}). In practice $\mathbb{D}(e, u)$ is defined only in a subset $\mathcal{D}$ of $[0, \infty[ \times [0, 1[$, diverges at $(\sqrt{\mu}^{\,-},u_a)$ and $(\sqrt{\mu}^{\,+},1)$, and varies rapidly around $u = 2 / 3$ (because rays make more and more turns near the photon sphere before falling or escaping). For good precision we thus map $\mathcal{D}$ non-linearly into a square $[0, 1]^2$ domain, designed to get more samples in these regions (see Appendix~\ref{sec:texmapping}). \paragraph*{Emitted light} It now remains to compute the light emitted from all the stars in the beam's footprint on the celestial sphere, around $\mathbf{d}' = \cos\delta' \mathbf{e}'_x + \sin\delta' \mathbf{e}'_y$. For extended sources such as nebulae or galaxies, we can simply take advantage of anisotropic texture filtering, by storing these sources in a cube map. For punctual stars, however, this would yield unrealistically stretched star images. Our solution is to use a manually filtered cube map (see Fig.~\ref{fig:filtering}): \begin{itemize} \item Each texture element (or {\em texel}) stores the color and position (in the texel) of at most one star. A color {\em sum} (and not average) and luminosity-weighted position average is used for mip-mapping. \item We compute the beam's footprint in the cube map by using screen space partial derivatives (implemented with finite differences by the rendering pipeline). To avoid discontinuities at cube edges, we compute the partial derivatives $\partial_w \mathbf{d}'$ and $\partial_h \mathbf{d}'$ of $\mathbf{d}'$, and then compute the derivatives of the cube map face texture coordinates $U, V$ analytically from them ({\em e.g.} for the +z face $U = d'^x / d'^z$, $\partial_w U = (\partial_w d'^x - U\partial_w d'^z) / d'^z$). \item We compute a mipmap level from the size of the footprint, fetch all the texels at this level in the footprint, and accumulate the colors of the corresponding stars. For anti-aliasing, and to conserve the total intensity, we view each star as a $1\times 1$ area and multiply its intensity with the area of its intersection with the considered screen pixel ({\em i.e.} with $f(w)f(h)$, where $f(x) = \max(1 - |x|, 0)$ and$(w, h)$ is the star's subpixel coordinates -- the pixel domain being $[-\frac{1}{2}, \frac{1}{2}]^2$). Note that this requires to consider an extended footprint (see Fig.~\ref{fig:filtering}). In our implementation, we select the mipmap level so that it is at most $9 \times 9$ texels. \end{itemize} Note that this method approximates quadrilateral footprints with parallelograms, and does not use interpolation across mipmap levels. This would be easy to fix, but is not really necessary since our method already gives very good results. \begin{figure}[t] \centering \includegraphics[width=\linewidth]{figures/filtering} \caption{\label{fig:filtering}Filtering. We compute the emitted light for a pixel by summing the light from the stars in its extended footprint (dashed parallelogram, computed with screen space partial derivatives) in a stars map, weighted by their pixel overlap area (cyan).} \end{figure} \subsubsection{Precomputations and beam tracing: accretion disc} As for stars, we compute the beam intersection(s) with the accretion disc by using a precomputed table. We then compute the light emitted there by using a simple procedurally animated disc model. \paragraph*{Intersections} Let $r_{ic}$ and $r_{oc}$ be the inner and outer radius of the disc (with $r_{ic}\ge 3$, the innermost stable circular orbit~\cite{Lasota2016}). Since the intersections can only occur at $\varphi = \alpha + m\pi$ (see Fig.~\ref{fig:notations:b}), we only need the function $u(\varphi)$ to check if there exist $m$ such that $u_{oc} \triangleq r_{oc}^{-1} \le u(\alpha + m\pi) \le u_{ic} \triangleq r_{ic}^{-1} \le 1 / 3$. For this we precompute $u(e, \varphi)$, {\em for light rays coming from infinity}, in a $\mathbb{U}(e, \varphi)$ table. At runtime, $u(\alpha + m\pi)$ can then be computed with $\mathbb{U}(e, \varphi_p + \alpha + m\pi)$, where $\varphi_p$ is the camera position (which can be obtained from the deflection $\Delta_p = \mathbb{D}(e, 1/p^r)$ since $\Delta = \varphi + \delta - \pi$ -- see Fig.~\ref{fig:notations:d}). Note that we don't need $\mathbb{U}$ for all $\varphi$: we can stop when $u \ge 1 / 3$ since no intersection can occur between this point and the apsis, if any (and the rest can be deduced by symmetry). In practice, this means that we only need $\mathbb{U}(e, \varphi)$ for $0 \le \varphi < \pi$, which has two consequences: \begin{itemize} \item we don't need to evaluate $\mathbb{U}(e, \varphi_p + \alpha + m\pi)$ for all $m$: in fact we only need $\mathbb{U}(e, (\varphi_p+\alpha)\mod\pi)$, \item there can be at most two intersections: one on each symmetric part of the ray (if it does not fall into the black hole). \end{itemize} The algorithms to precompute $\mathbb{U}(e, \varphi)$ and to find the accretion disc intersections $(u_0, \varphi_0)$, $(u_1, \varphi_1)$ follow from these properties, and those of Section~\ref{sec:properties}, and are shown in Algorithm~\ref{fig:algo}. As for $\mathbb{D}$, we map $\mathbb{U}$'s domain non-linearly into $[0,1]^2$ to get good precision in large gradient areas (see Appendix~\ref{sec:texmapping}). Finally, note that for an animated disc we also need to compute the {\em retarded time} between the intersections and the camera. For this we also precompute and store $t$ -- using $\diff t / \diff\varphi = e / (u^2 - u^3)$, from \eqref{eq:motionconstants} -- in $\mathbb{D}$ and $\mathbb{U}$. This allows the computation of the retarted times $t_0$, $t_1$ at the disc intersections, as shown in Algorithm~\ref{fig:algo}. \begin{figure}[t] \centering \includegraphics[width=\linewidth]{figures/disc} \caption{\label{fig:disc}Accretion disc. We compute the density with a sum of linear particles moving along precessing orbits (left), whose density at a point $h$ depends on its "distance" $d(\delta r, \delta\phi)$ from the particle center $o$ (see Appendix~\ref{sec:discparticles}).} \end{figure} \paragraph*{Emitted light} It now remains to compute the light emitted by the accretion disc at the intersection points. For this we use the light emitted by a black body at temperature $T(u)$, times the disc density. We use $T^4(u) \propto u^3 (1 - \sqrt{3u})$~\cite{Lasota2016}, and compute the density with a sum of procedural particles moving along quasi-circular precessing orbits (see Fig.~\ref{fig:disc} and Appendix~\ref{sec:discparticles}). \subsection{Shading} Due to gravitational effects, the light received at the camera is different from the emitted light, computed above. We present these effects below, and explain how we compute them. \subsubsection{Gravitational lensing effects} Due to gravitational lensing, the light emitted by a punctual star is received amplified by a factor $\Omega / \Omega'$, where $\Omega$ (resp. $\Omega'$) is the beam's solid angle at the camera (resp. emitter)~\cite{Virbhadra99}. We compute it by using the screen space partial derivatives of the beam directions at the camera and at the emitter: \begin{equation} \frac{\Omega}{\Omega'} = \frac{\Vert \partial_w \mathbf{q} \wedge \partial_h \mathbf{q} \Vert} {\Vert \partial_w \mathbf{d}' \wedge \partial_h \mathbf{d}' \Vert} \end{equation} where $\mathbf{q}$ is the normalized $[q^w, q^h, -f]^\top$ vector. Note that this does not apply to area light sources, because the beam's subtended area varies in inverse proportion. \subsubsection{Doppler and beaming effects} Due to gravitational and relativistic time dilation and length contraction effects, the frequency $\nu$ of the received light differs from the emitted frequency $\nu'$. The ratio is given by~\cite{Philipp2017} \begin{equation} \frac{\nu}{\nu'} = \frac{g(\vec{k}, \vec{l})}{g(\vec{k}', \vec{l}')} \label{eq:Doppler} \end{equation} where $\vec{k}$ is the 4-velocity of the receiver, $\vec{l}$ is the tangent 4-vector $\diff s / \diff \sigma$, at the receiver, of the light ray curve $s(\sigma)$, and $\vec{k}'$ and $\vec{l}'$ are the corresponding emitter quantities. We thus need $\vec{k}$, $\vec{k}'$, $\vec{l}$, and $\vec{l}'$, that we compute as follows. In {\em rotated} Schwarzschild coordinates, $\vec{l}$ and $\vec{l}'$ are given by $[\diff t / \diff \sigma, \diff r / \diff \sigma, \diff \vartheta / \diff \sigma, \diff\varphi / \diff \sigma]^{\top}$. Using \eqref{eq:motionconstants}, this gives \begin{equation} \vec{l} = \left[ \frac{e}{1 - u}, -\dot{u}, 0, u^2 \right]^{\top} \quad \vec{l}' = \left[\frac{e}{1 - u'}, -\dot{u'}, 0, u'^2 \right]^{\top} \end{equation} where $e$ is the {\em negative} root of \eqref{eq:rayequation} -- for the actual light rays $\diff \varphi / \diff t$ is negative, unlike in the previous section where rays were traced backward. In {\em non-rotated} Schwarzschild coordinates, we have \begin{equation} \vec{k}' = \left[ \sqrt{\frac{2}{2 - 3u'}}, 0, 0, \sqrt{\frac{u'^3}{2 - 3u'}} \right]^{\top} \label{eq:k_prime} \end{equation} for the accretion disc (if we assume a circular motion)~\cite{Philipp2017}, $\vec{k}' = [1, 0, 0, 0]^{\top}$ for static stars, and $\vec{k} = \vec{e}_{\tau}$ for the camera. Finally, to compute $g(\vec{k}, \vec{l})$ and $g(\vec{k}', \vec{l}')$, we need the corresponding {\em rotated} coordinates: $k^t$ and $k^r$ are unchanged, $k^\vartheta$ is not needed since $l^\vartheta = 0$ and $k^\varphi = \vec{k} \cdot \vec{\partial}_\varphi/r^2 = u\mathbf{k} \cdot \mathbf{e}_{y'}$ (and similarly for $\vec{k}'$). For instance, for the accretion disc, we get $k'^\varphi = k'^\phi \mathbf{e}_z \cdot \mathbf{e}_{z'}$ and \begin{equation} g(\vec{k}', \vec{l}') = e \sqrt{\frac{2}{2 - 3u'}} - \sqrt{\frac{u'^3}{2 - 3u'}} {\mathbf e}_z \cdot {\mathbf e}_{z'} \end{equation} The above Doppler effect has an associated {\em beaming} effect: the received intensity differs from the emitted one because, from Liouville's theorem, $I(\nu) / \nu^3$ is invariant \cite{Misner1973}. In terms of wavelength $\lambda \triangleq \nu^{-1}$, and with $I(\lambda) \diff \lambda = I(\nu) \diff \nu$, this gives $I(\lambda) = (\lambda' / \lambda)^5 I(\lambda')$. For black bodies the two effects result in a temperature shift $T = (\nu / \nu') T'$. For other light sources however, that we want to support, the result is more complex. We thus precompute it in a 3D texture $\mathbb{C}(xy, D)$, for each chromaticity $xy$ and Doppler factor $D \triangleq \nu / \nu'$. To this end we need to choose a spectrum for each chromaticity, among the infinite number of possible spectrums. For simplicity and to get black body spectrums for black body colors, we use spectrums of the form $I(\lambda') = B_T(\lambda')(1 - a_1 A_1(\lambda') -a_2 A_2(\lambda'))$, where $B_T$ is the black body spectrum for temperature $T$, $T$ is the correlated color temperature, and $A_1$ and $A_2$ are two fixed absorption spectrums. A linear system gives $a_1$ and $a_2$ from the $xy$ chromaticity, and the Doppler and beaming effects give CIE XYZ colors that we precompute with \begin{equation} \mathbb{C}(xy,D) = D^5 \frac{\int I(D\lambda) \left[ \bar{x}(\lambda), \bar{y}(\lambda), \bar{z}(\lambda)\right]^{\top} \diff \lambda} {\int I(\lambda) (\bar{x}(\lambda) + \bar{y}(\lambda) + \bar{z}(\lambda))\ \diff\lambda} \end{equation} where $\bar{x}$, $\bar{y}$ and $\bar{z}$ are the CIE color matching functions. At runtime, the emitted XYZ color computed in Section \ref{sec:beamtracing} is transformed into the received color $(X + Y + Z)\mathbb{C}(xy, \nu/\nu')$ with \eqref{eq:Doppler}. \subsubsection{Lens glare effects} Due to light scattering and diffraction inside the eye, haloes appear around very bright light sources, which would otherwise be hard to distinguish from fainter sources. For this reason we apply a bloom shader effect on the final image, before tone-mapping. We use a series of small support filter kernels on mipmaps of the full image, approximating a point spread function from \cite{Spencer95}, but more precise methods are possible too \cite{HullinSIG2011}. \section{Implementation}\label{sec:implem} \begin{figure}[t] \centering \includegraphics[width=\linewidth]{figures/orbit} \caption{\label{fig:orbit}Camera orbit. The orbit, in red, is specified by an inclination $\chi$ and the initial conditions $r_0$, $\delta_0$ and $v_0$ (see Appendix~\ref{sec:cameraorbit}).} \end{figure} We implemented our method in C++ for the precomputations, and WebGL 2 for the rendering. The full source code and an online demo are available at \url{https://github.com/ebruneton/black_hole_shader}. The demo simulates a static or freely falling observer (see Fig.~\ref{fig:orbit}) and allows the user to set various parameters (black hole mass, disc temperature and density, camera orbit, etc). We precompute $\mathbb{D}(e, u)$ and $\mathbb{U}(e, \varphi)$ in $512 \times 512$ and $64 \times 32$ RG32F textures, respectively. \textsc{Precompute} takes about 11 seconds on a $3.2$GHz Intel Core i5-6500 CPU, with $\epsilon = 10^{-5}$, and unit tests show that \textsc{TraceRay} results $\delta'$, $u_0$ and $u_1$ are within $10^{-3}$ of reference values computed without intermediate textures. We precompute $\mathbb{C}(xy, D)$ in a $64 \times 32 \times 64$ RGB32F texture, in about 7 seconds. We also precompute two $6 \times 2048 \times 2048$ RGB9E5 cubemaps for the area and punctual light sources, from the Gaia DR2 \cite{gaiadr22018} and Tycho 2 \cite{tycho2000} star catalogs. This requires downloading and processing 550GB of compressed data, which can take a day, and yields $\approx 3.6$ million punctual light sources. In our implementation we don't store a sub-texel position for each star: instead, we use a hash of its color to compute a pseudo-random position. \section{Results and discussion}\label{sec:results} Some results obtained with our method are shown in Fig.~\ref{fig:teaser}. They are rendered in Full HD at about $150$ fps on an NVidia GeForce GTX 960 (see Table~\ref{table:perf}). The benefit of our precomputed tables can be measured by replacing \textsc{TraceRay} with a ray-marching method integrating \eqref{eq:rayequation} numerically (and keeping everything else unchanged). To get the same performance as with the precomputed tables, only $25$ integration steps at most can be used, and stars end up at several degrees from their correct positions. To get (almost) the same precision, up to 1000 integration steps must be used, and the framerate drops to about $45$ fps (see method M3 in Table~\ref{table:perf}). The benefit of our custom texture filtering method to render the stars can be measured by replacing it with the method from \cite{Muller2010} (and keeping everything else unchanged). Each of the $3.6$ million stars is then rendered with $n$ $2 \times 2$ anti-aliased point primitives (we used $n = 2$ in our tests). The point position is computed with a lookup in a $4096 \times 4096$ precomputed texture. This gives about $65$ fps (see method M2 in Table~\ref{table:perf}). The main bottleneck is due to the fact that, inside the region of Einstein rings, many stars project into the same pixel, leading to a lot of overdraw. \begin{table} \centering \begin{tabular}{|c|c|c|c|c|c|c|} \hline View & M1 & M2 & M3 & Stars & Disc & Bloom \\ \hline \hline 1 & 150 & 64 & 43 & 2.99 & 0.82 & 1.11 \\ 2 & 160 & 59 & 45 & 2.64 & 0.78 & 1.15 \\ 3 & 153 & 67 & 45 & 2.99 & 0.69 & 1.10 \\ 4 & 114 & 64 & 41 & 4.14 & 1.87 & 1.08 \\ \hline \end{tabular} \caption{\label{table:perf} The framerate obtained with our method (M1), with stars rendered with \cite{Muller2010} (M2), and with \textsc{TraceRay} replaced with ray-marching (M3), on the views shown in Fig.\ref{fig:teaser} and numbered from left to right ($1920\times1080$p, NVidia GeForce GTX 960). The other columns give the time used per frame (in milliseconds), to render the stars, the accretion disc and the bloom effect with our method.} \end{table} A limitation of our method is that views from inside the horizon are not supported. Indeed, since no observer can remain static in this region, we can no longer specify the camera and initialize light beams by using a reference static observer. Also the Schwarzschild metric diverges at the horizon. However, by using different coordinates, as in \cite{Muller2010}, we believe that our method can be extended to support this case. Another limitation is that motion blur, which is necessary for very high quality animations, is not supported. Also, because of the approximations in our custom texture filtering method, in some cases a few stars flicker when the camera moves. Fixing both quality issues might be easier by extending \cite{Muller2010} rather than by extending our method, at the price of decreased performance. Finally, another limitation of our method, and of \cite{Muller2010} as well, is that rotating black holes are not supported. Because they are only axially symmetric, the "inverse ray tracing" approach of \cite{Muller2010} would probably be hard to generalize to this case. Our precomputed ray tracing method, on the other hand, could in principle be generalized to 4D tables containing the deflected direction and the accretion disc intersections of any ray (specified with 2 position and 2 direction parameters). In practice however, obtaining precise 4D tables of reasonable size might be hard. \section{Conclusion}\label{sec:conclusion} We have presented a beam tracing method relying on small precomputed textures to render real-time high-quality images of non rotating black holes. Our method is simple to implement and achieves high frame rates. Extending it to views from inside the horizon, and to rotating black holes, if this is possible, is left as future work. \paragraph*{Acknowledgments} We would like to thank Alain Riazuelo for proofreading this paper. \appendix \section{Texture mappings}\label{sec:texmapping} We store $\mathbb{D}(e,u)$ at texel coordinates \begin{equation*} \begin{split} &\left[\frac{1}{2} - \sqrt{-\log(1 - e^2 / \mu) / 50},\ 1 - \sqrt{1 - u / u_a}\right]\ \mathrm{if}\ e^2 < \mu\\ &\left[\frac{1}{2} + \sqrt{-\log(1 - \mu / e^2) / 50},\ \frac{\sqrt{2/3} \pm \sqrt{|u - 2 / 3|}}{\sqrt{2 / 3} + \sqrt{1 / 3}}\right]\ \mathrm{otherwise} \end{split} \end{equation*} where $\pm$ is the sign of $u-2/3$, and $\mathbb{U}(e, \varphi)$ at texel coordinates \begin{equation*} \left[\frac{1}{1 + 6 e^2},\ \frac{\varphi}{3} \frac{1 + 6 e^3}{1 + e^2}\right] \end{equation*} as explained at \url{https://ebruneton.github.io/black_hole_shader/black_hole/functions.glsl.html}. \section{Disc particles}\label{sec:discparticles} The orbit of a point particle in the accretion disc is given by \begin{equation*} u = u_1 + (u_2 - u_1) \mathrm{sn}^2 \left(\frac{\phi}{2} \sqrt{u_3 - u_1}, \kappa\right), \quad \kappa = \sqrt{\frac{u_2 - u_1}{u_3 - u_1}} \end{equation*} where $\mathrm{sn}$ is the Jacobi-sn function, $u_1 \le u_2 \le 1/3$, and $u_3 = 1 - u_1 - u_2$ \cite{darwin1959}. For quasi-circular orbits this can be approximated with \begin{equation} \begin{split} u(t) &\approx u_1 + (u_2 - u_1) \sin^2\left(\frac{\pi}{4K}\phi(t)\sqrt{u_3 - u_1}\right)\\ \phi(t) &\approx \sqrt{\frac{\bar{u}^3}{2}}\ t + \phi_0, \quad K = \int_0^1 \frac{\diff x}{\sqrt{(1 - x^2)(1 - \kappa^2x^2)}} \end{split}\label{eq:approx-disc-orbit} \end{equation} where $\bar{u} = (u_1 + u_2) / 2$ since, for circular orbits, \eqref{eq:k_prime} gives $\diff\phi / \diff t = \sqrt{u^3 / 2}$. For a linear particle parameterized by $a \in [0,2\pi[$, the position $u_a(t), \phi_a(t)$ of a point $a$ is obtained by replacing $\phi(t)$ with $\phi_a(t) = a + \phi(t)$ in \eqref{eq:approx-disc-orbit}. Thus, given a ray hit point $h^t, h^r, h^\phi$, we compute the parameter $a$ of the "nearest" particle point with $a = h^\phi - \phi(h^t) \mod 2\pi$. We then compute the "distance" between $h$ and the linear particle center (at $a = \pi$) with $d^2 = (a / \pi - 1)^2 + (h^r - 1 / u_a(h^t))^2$. We finally compute the particle density at $h$ with a smoothly decreasing function of $d$. \section{Camera orbit}\label{sec:cameraorbit} \paragraph*{Position} The camera position is specified by its polar coordinates $(r,\psi)$ in an orbital plane with inclination $\chi$ (see Fig.~\ref{fig:orbit}). In Schwarzschild coordinates adapted to this orbital plane the camera 4-velocity is $\vec{k}_c = [\frac{\diff t}{\diff \tau}, \frac{\diff r}{\diff \tau}, 0, \frac{\diff \psi}{\diff \tau}]^\top = [\frac{e}{1 - u}, \frac{\diff r}{\diff \tau}, 0, l u^2]^\top$, where $e$ and $l$ are two constants of motion. Substituting this in \eqref{eq:metric} gives \begin{equation*} \left(\frac{\diff r}{\diff \tau}\right)^2 = e^2 + l^2 u^3 - l^2 u^2 + u - 1 \Rightarrow \frac{\diff^2 r}{\diff \tau^2} = \frac{2 l^2 u^3 - 3 l^2 u^4 - u^2}{2} \end{equation*} We use these relations to update the coordinates $(t, r, \psi)$ at each proper time step $\diff \tau$. The corresponding Cartesian coordinates are \begin{equation*} r \begin{bmatrix} \cos\chi \cos\psi \\ \sin\psi\\ \sin\chi \cos\psi \end{bmatrix} = p^r \begin{bmatrix} \sin p^\theta \cos p^\phi\\ \sin p^\theta \sin p^\phi\\ \cos p^\theta \end{bmatrix} \end{equation*} from which we deduce the Schwarzschild coordinates $p^t = t $, $p^r = r$, $p^\theta = \arccos(\cos\psi \sin\chi)$ and $p^\phi = \arctan2(\sin\psi, \cos\chi \cos\psi)$. The above relations require the constants of motion $e$ and $l$. We compute them from the initial position, direction and speed, noted $r_0 = 1 / u_0$, $\delta_0$, and $v_0$ (see Fig.~\ref{fig:orbit}). We get $e^2 = (1 - u_0) / (1 - v_0^2)$ from the Lorentz factor $\gamma = g(\vec{k}_c, \vec{k}_s) = e / \sqrt{1 - u} = 1 / \sqrt{1 - v^2}$, where $\vec{k}_s = [1 / \sqrt{1 - u}, 0, 0, 0]^\top$ is the 4-velocity of a static observer. Finally, using $\tan\delta = r \diff\psi / \diff r$ and the above equations, we get $l^2 = (e^2 + u_0 - 1) / (u_0^2 (1 - u_0 + \cot^2\delta_0))$. \paragraph*{Lorentz transform} We compute the Lorentz transform $\Lambda$ from the static observer basis $\vec{e}_t, \vec{e}_r, \vec{e}_\theta, \vec{e}_\phi$ to the camera basis $\vec{e}_\tau, \vec{e}_w, \vec{e}_h, \vec{e}_d$ by using the intermediate orthonormal basis $\vec{e}_t, \vec{e}_r, \vec{e}_\chi, \vec{e}_\psi$ where $\mathbf{e}_\chi$ is the orbital plane's normal (see Fig.~\ref{fig:orbit}), as follows. Let ${R_k}^j$ be the rotation matrix from $\vec{e}_t, \vec{e}_r, \vec{e}_\theta, \vec{e}_\phi$ to $\vec{e}_t, \vec{e}_r, \vec{e}_\chi, \vec{e}_\psi$: $\vec{e}_k = {R_k}^j \vec{e}_j$, $k \in \{t, r, \chi, \psi\}$, $j \in \{t, r, \theta, \phi\}$. Its lower right block is \begin{equation*} \begin{bmatrix} \mathbf{e}_\chi \cdot \mathbf{e}_\theta & \mathbf{e}_\chi \cdot \mathbf{e}_\phi \\ -\mathbf{e}_\chi \cdot \mathbf{e}_\phi & \mathbf{e}_\chi \cdot \mathbf{e}_\theta \end{bmatrix}\ \mathrm{with} \begin{array}{l} \mathbf{e}_\chi \cdot \mathbf{e}_\theta = \sin\chi \cos p^\theta \cos p^\phi + \cos\chi \sin p^\theta \\ \mathbf{e}_\chi \cdot \mathbf{e}_\phi = \sin\chi \sin p^\phi \end{array} \end{equation*} In the $\vec{e}_t, \vec{e}_r, \vec{e}_\chi, \vec{e}_\psi$ basis the camera 4-velocity and speed are: \begin{equation*} \begin{split} \vec{k}_c &= \left[ \sqrt{1 - u} \frac{\diff t}{\diff \tau}, \frac{1}{\sqrt{1 - u}} \frac{\diff r}{\diff \tau}, 0, \frac{1}{u} \frac{\diff \psi}{\diff \tau} \right]^\top \\ \mathbf{v} &= \left[\frac{1}{1 - u} \frac{\diff r}{\diff \tau} / \frac{\diff t}{\diff \tau}, 0, \frac{1}{u \sqrt{1 - u}} \frac{\diff \psi}{\diff \tau} / \frac{\diff t}{\diff \tau} \right]^\top \end{split} \end{equation*} A reference frame for the camera is thus $\vec{e}_{k'} \triangleq {B(\mathbf{v})_{k'}}^k \vec{e}_k$, $k' \in \{t', r', \chi', \psi'\}$, where $B(\mathbf{v})$ is a Lorentz boost: ${B(\mathbf{v})_{k'}}^k = {B(-\mathbf{v})^{k'}}_k$, $\diff p^{k'} = {B(\mathbf{v})^{k'}}_k \diff p^k$~\cite{weinberg1972}. Finally, let ${O_i\,}^{k'}$ be a user specified rotation matrix. We then compute $\Lambda$ with ${\Lambda_i}^j = {O_i\,}^{k'} {B(\mathbf{v})_{k'}}^k {R_k}^j$. Note that this procedure assumes that the camera orientation is actively controlled, {\em i.e.} is not freely evolving as a gyroscope would be. \bibliographystyle{alpha} \bibliography{paper} \end{document}
{ "alphanum_fraction": 0.6895678328, "avg_line_length": 50.1349206349, "ext": "tex", "hexsha": "cc074a2e0f2e48c220a759c69ee1b86ce713c572", "lang": "TeX", "max_forks_count": 9, "max_forks_repo_forks_event_max_datetime": "2022-03-02T00:20:08.000Z", "max_forks_repo_forks_event_min_datetime": "2020-07-06T08:46:29.000Z", "max_forks_repo_head_hexsha": "e72b3f293409893a6fa25528b29572c96fc57f57", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "ebruneton/black_hole_shader", "max_forks_repo_path": "black_hole/paper/paper.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e72b3f293409893a6fa25528b29572c96fc57f57", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "ebruneton/black_hole_shader", "max_issues_repo_path": "black_hole/paper/paper.tex", "max_line_length": 208, "max_stars_count": 68, "max_stars_repo_head_hexsha": "e72b3f293409893a6fa25528b29572c96fc57f57", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "ebruneton/black_hole_shader", "max_stars_repo_path": "black_hole/paper/paper.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-19T08:20:22.000Z", "max_stars_repo_stars_event_min_datetime": "2020-04-12T20:40:22.000Z", "num_tokens": 14194, "size": 44219 }
% ############################################################################# % This is the MAIN DOCUMENT of the IST-UL-Project-Report TEMPLATE. % !TEX root = ./main.tex % ############################################################################# % The document is automatically set for english or portuguese by just selecting % the MAIN LANGUAGE in file 'IST-UL-Project-Report-Preamble.tex' % ############################################################################# % Version 1.0, October 2018 % BY: Prof. Rui Santos Cruz, [email protected] % ############################################################################# % Set the document class % ---------------------------------------------------------------------- \documentclass[12pt,a4paper,oneside]{report} % ----------------------------------------------------------------------------- % The Preamble document contains all the necessary Packages for typesetting % Modify it to suit your needs % ----------------------------------------------------------------------------- \input{./IST-UL-Project-Report-Preamble} % ############################################################################# \begin{document} % Set plain page style (no headers, footer with centered page number) \pagestyle{plain} % Set roman numbering (i,ii,...) before the start of chapters \pagenumbering{roman} % ---------------------------------------------------------------------------- % Cover page \input{IST-UL-Project-Report-Front_Cover} \cleardoublepage % ---------------------------------------------------------------------------- % Table of contents, list of tables, list of figures and nomenclature % ---------------------------------------------------------------------------- \tableofcontents \clearpage \begingroup \let\clearpage\relax \let\cleardoublepage\relax \let\cleardoublepage\relax % List of tables %\listoftables % Add entry in the table of contents as section %\addcontentsline{toc}{section}{\listtablename} % List of figures \listoffigures % Add entry in the table of contents as section \addcontentsline{toc}{section}{\listfigurename} \endgroup % -------------------------------------------------------------------------- % acronyms DEFINE YOUR ACRONYMS HERE % -------------------------------------------------------------------------- \chapter*{\tlangAcronyms} \begin{acronym}[NGOSS] \acro{GCP}{Google Cloud Platform} \acro{AWS}{Amazon web Services} \acro{mongoDB}{Mongo database} \acro{JSON}{JavaScript Object Notation} \acro{VM}{Virtual Machine} \acro{K8s}{Kubernetes} \acro{DNS}{Domain Name System} \end{acronym} % Set arabic numbering (1,2,...) after preface \setcounter{page}{1} \pagenumbering{arabic} % ############################################################################# % % BEGIN MAIN DOCUMENT BODY % % ############################################################################# \chapter{Executive Summary} \label{chapter:introduction} The goal of this work is to show an example on how to deploy and provision tiered (frontend, backend) Microservices-based containerized Web Application on a Public Cloud provider, such as \ac{GCP} or \ac{AWS}, using automation tools such as Terraform, Ansible, Pulumi, as well as implementing instrumentation on the applications, services and infrastructure components of the solution, to allow monitoring and logging features by using tools such as Prometheus, Fluentd, etc. This work studies the RocketChat which is an open-source platform for team collaboration, allowing live chat, direct message and group messages, upload the images and, other interesting features. \cite{rocketchat} It is written in full-stack JavaScript and uses the \ac{mongoDB}. Mongo is a NoSQL database, using JSON-like documents as schema. \cite{mongodb} To complete a monitoring system it is also implemented. To make the monitoring, the Grafana and Prometheus were used. Grafana is a open source analytics and interactive visualization web application that provides charts, graphs, and alerts for the web. \cite{grafana} The purpose of Grafana dashboards allows the users to better understand the metrics. Prometheus is an open-source event monitoring and alerting tool that records real-time metrics, with flexible queries and real-time alerting. \cite{prometheus} This report starts by talking about the System Architecture \ref{chapter:sys-architecture}, and approaching its requirements and constraints. The next chapter \ref{chapter:deployment} shows how to deploy the project and some images of the project running. In the Versioning Chapter \ref{chapter:versioning} how the system versions were managed will be explained and for the last part, which is the Evolution Chapter \ref{chapter:work-evol}, it approaches the extra steps the group has taken in order to achieve the best solution possible in the time given (for example, it is approached the previous version of the project, the docker-compose version). There is also a Video related to this project, stating how to deploy and giving an overview oh the work done. This video can be found in the following link: \url{https://www.youtube.com/watch?v=e5jRSE9jek8} \cleardoublepage % ############################################################################# \chapter{System Architecture} \label{chapter:sys-architecture} \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{./pictures/overview-system-architecture.png} \caption{Overview System Architecture} \label{fig:overview-sys-architecture} \end{figure} By the Figure \ref{fig:overview-sys-architecture}, it is possible to see in a generic way how the system is composed. The system is used to satisfy two use cases, the monitoring use case and the "normal" web service use case. The first one is related to how a user can watch all the metrics. The second one is related to how a user can use the web service application. \begin{itemize} \item \textbf{Monitoring use case:} The user will access the Grafana dashboard to see the metric collected. Grafana will interact with Prometheus who is responsible to collect the metrics related with all the components of the system (in the next section \ref{section:analysis} it will be explained that the system will be deployed as a cluster and the metrics collected will be from the cluster itself). \item \textbf{Web Service Application use case:} The user will access the frontend web page of RocketChat to use the application. The application's frontend will talk with \ac{mongoDB} to handle the database operations. The load balancer says that the requests from users will be distributed to the RocketChat Servers (more than one to support more users with better performance) \end{itemize} % ############################################################################# \section{Analysis} \label{section:analysis} The system will be built on top of \ac{K8s} in order to satisfy the system requirements. \begin{figure}[htb] \centering \includegraphics[width=1.0\textwidth]{./pictures/system_architecture_k8s.png} \caption{System Architecture in Kubernetes} \label{fig:sys-architecture-k8s} \end{figure} As it can be seen in the Figure \ref{fig:sys-architecture-k8s}, the \ac{K8s} environment is composed with several objects: \begin{itemize} \item \textbf{RocketChat Pods} to handle the requests from the client (RocketChat servers). The Pod contains a RocketChat container inside of it, and the port 3000 is exposed to be possible to contact the RocketChat app; \item \textbf{RocketChat Service} to load-balance the client requests for the RocketChat Pods. The Service type is LoadBalancer, provisioning a load balancer for the service. It is exposing port 3000 to outside and forwards traffic to port 3000 to RocketChat Pods; \item \textbf{RocketChat Mongo Pods} to handle the requests from RocketChat servers and persist the data into the database, persisted in a volume; The Pod contains the RocketChat-Mongo container responsible for what has been written behind, and a second container called "rocketchat-mongo-sidecar" which is the sidecar container that will automatically configure the new Mongo nodes to join the replica set; \item \textbf{RocketChat Mongo Service} to forward the traffic to Mongo Pods. The Service is a headless service so, there is no load balancing or proxying done. It defines the selector so, endpoint records will be created in the API, and the \ac{DNS} returns A records (IP addresses) that point directly to the Pods backing the Service; \item \textbf{Grafana Pods} to handle the requests for monitoring purposes. The Pod contains a Grafana container inside of it, exposing the port 3000 to be possible to contact the Grafana app; \item \textbf{Grafana Service} to load-balance the client requests for Grafana Pods. The Service type is LoadBalancer, provisioning a load balancer for the service. It is exposing port 3000 to outside and forwards traffic to port 3000 to Grafana Pods; \item \textbf{Prometheus Pods} that collect metrics about the cluster and its resources. The Pod contains a Prometheus container inside of it, exposing the port 9090 to be possible to contact the Prometheus app; \item \textbf{Prometheus Service} to load-balance the client requests for the Prometheus Pods. The Service type is LoadBalancer, provisioning a load balancer for the service. It is exposing port 9090 to outside and forwards traffic to port 9090 to Prometheus Pods. \end{itemize} There is two important factor to notice here. The first one is that there is two \ac{K8s} namespaces which organizes the cluster into virtual sub-clusters leading to better management by different teams in a real world environment. With this in mind, an \textbf{application} namespace was created, containing the RocketChat Server and Mongo Pods and their Services. The other name space is called \textbf{istio-system} and contains all resources related to monitoring, the Grafana and Prometheus Pods, their Services and other important resources. The second important factor is the fact that all these Pods can be scaled up or scaled down depending on the need. % ############################################################################# %\newpage \subsection{Built With} \label{section:built-with} \begin{itemize} \item \textbf{Vagrant} (\url{https://www.vagrantup.com/}) - Vagrant provides the same, easy workflow regardless of your role as a developer, operator, or designer. \item \textbf{Terraform} (\url{https://www.terraform.io/}) - Terraform is an open-source infrastructure as code software tool that provides a consistent CLI workflow to manage hundreds of cloud services. \item \textbf{Docker} (\url{https://docker.com/}) - Docker is the \#1 most wanted and #2 most loved developer tool, and helps millions of developers build, share and run any app, anywhere-on-prem or in the cloud. \item \textbf{Google Cloud Platform} (\url{https://cloud.google.com}) - Reliable and high performance cloud platform from Google. \item \textbf{Kubernetes} (\url{https://kubernetes.io}) - Kubernetes is an open-source container-orchestration system for automating computer application deployment, scaling, and management. \end{itemize} The following technologies were also used in the docker-compose version of the project (previous version - more information in Evolution Chapter \ref{chapter:work-evol}): \begin{itemize} \item \textbf{Ansible} (\url{https://www.ansible.com/}) - Ansible is an open source community project sponsored by Red Hat, it's the simplest way to automate IT. \item \textbf{Docker Compose} (\url{https://docs.docker.com/compose/}) - Compose is a tool for defining and running multi-container Docker applications. With Compose, you use a YAML file to configure your application's services. \end{itemize} % ############################################################################# \subsection{Requirements} \label{section:requirements} \paragraph{The Requirements to Run the Application} are the following software: \begin{description} \item[R01] Vagrant (latest); \item[R02] Virtualbox (latest). \end{description} To confirm that the software is installed, type in a terminal the following commands: \begin{lstlisting} [style=Bash] $ vagrant --version $ vboxmanage --version \end{lstlisting} Vagrant is necessary to create the \textbf{management machine}. This is done with a Vagrantfile that uses a Virtualbox provider (this Vagrant file is not prepared to run on systems with architecture ARM arch64, namely Apple Silicon M1 computers - to run this on a machine with these characteristics, another provider such as Docker must be used). It is also a requirement to have a \textbf{\ac{GCP} account}. With this it is possible to create a project, enable the API (see the note below to further information) and then it is needed to download the credentials.json to the terraform folder (to authenticate the project). \textbf{NOTE:} It is needed to ENABLE APIs AND SERVICES for the Project, by choosing in the Google Cloud Console API & services and next selecting the Dashboard, where it is possible to see a button on the top menu for enabling those services. Then enable \textbf{Kubernetes Engine API}. \paragraph{The Requirements for the System} are: \begin{description} \item[R01] \label{itm:r01} Easy and Automatic Deployment; \item[R02] \label{itm:r02} Scalability; \item[R03] \label{itm:r03} Portability; \item[R04] \label{itm:r04} Flexibility. \end{description} For the \textbf{R01} requirement, the system is deployed using Terraform. It is only needed to run the commands described in the Deployment Chapter \ref{chapter:deployment} and everything will be deployed. For the \textbf{R02}, \textbf{R03} and \textbf{R04} requirements, the system is deployed on top of Kubernetes which although it is more complex, it allows for autoscaling, it is flexible and portable, meaning it works in any type of underlying infrastructure and it can be used on many different infrastructure and environment configurations. % ############################################################################# \subsection{Constraints} \label{section:constraints} Here are the Constraints for the System: \begin{description} \item[C01] The configuration of MONGO\_URL and MONGO\_OPLOG\_URL for the RocketChat-Server Pods need to be written manually. \end{description} Scaling up the \ac{mongoDB} it is automatic and the new Pods will automatically join the replica-set of \ac{mongoDB}. This happens because of the sidecar container used, explained in the Analysis Section \ref{section:analysis}. The \textbf{C01} constraints exists because the RocketChat application needs two environment variables called MONGO\_URL and \newline MONGO\_OPLOG\_URL \cite{rocketchat-create-docker-container}. These variables need to declare how to access the \ac{mongoDB} nodes of the replica-set. For example, let's say that a new fifth Pod of RocketChat Mongo was added to the replica-set. Now, it is needed to add to MONGO\_URL and MONGO\_OPLOG\_URL how to get to this new Pod, by adding the DNS name of this new Pod, for this example would be something like "rocketchat-mongo-5.rocketchat-mongo.application" ($<$pod-name$>$.$<$service-name$>$.$<$namespace$>$). All this research is supported by the Kubernetes documentation on how to Run MongoDB on Kubernetes with StatefulSets document \cite{running-mongodb-on-kubernetes} that says the following: "Include the two new nodes in your connection string URI and you are good to go. Too easy!" % ############################################################################# \subsection{Project Structure} \label{section:proj-structure} \begin{verbatim} |- Vagrantfile |- bootstrap-mgmt.sh |- docs |- docker-compose-version \- tools \- terraform |- agisit-2021-rocketchat-06.json \- cluster |- gcp-gke-main.tf |- gcp-gke-provider.tf |- terraform.tfvars |- gcp_gke | \- gcp-gke-cluster.tf \- gcp_k8s |- monitoring | |- grafana.yaml | \- prometheus.yaml |- k8s-istio.tf |- k8s-monitoring.tf |- k8s-namespaces.tf |- k8s-pods.tf |- k8s-provider.tf |- k8s-services.tf \- k8s-variables.tf \end{verbatim} \begin{itemize} \item At the root there is the `Vagrantfile` needed to create the management VM (`mgmt`). For this, the `bootstrap-mgmt.sh` script will be executed and, the goal of this script to install the necessary software, or in other words, prepare the management machine. The management machine will be used to do all the operations of the project; \item The `docs` folder contains the reports from all checkpoints and images; \item the `docker-compose-version` contains the previous checkpoints' implementation (more information in the Evolution Chapter \ref{chapter:work-evol}); \item The `tools` folder contains all the project's infrastructure/services files that will be deployed with the help of our `mgmt` VM. Inside of this folder there is: \begin{itemize} \item The `terraform` folder to provision the infrastructure. Here we have the following folders and files: \subitem The json file with the credentials to be used in the deployment of the cluster. (you should change this file to the credentials of your project!); \subitem The folder `cluster` that contains everything used to deploy the entire cluster infrastructure: \begin{itemize} \item `gcp-gke-main.tf`, defines the modules used for the deployment and provisioning; \item `gcp-gke-provider.tf`, defines the provider to be used; \item `terraform.tfvars`, defines some variables used with the Terraform files; \item `gcp\_gke`, folder for the cluster definition file; \subitem `gcp-gke-cluster.tf`, defines the cluster as well as outputs certain values. \item `gcp\_k8s`, folder containing the Terraform files defining the pods, services, namespaces and other resources needed. \subitem the `monitoring` folder with : \begin{itemize} \item `grafana.yaml`, entire configuration file for the Grafana system. \item `prometheus.yaml`, entire configuration file for the Prometheus system. \end{itemize} \subitem `k8s-istio.tf`, ISTIO Service Mesh deployment via helm charts. \subitem `k8s-monitoring.tf`, deploys Grafana and Prometheus referring to the yaml files in folder monitoring \subitem `k8s-namespaces.tf`, defines namespaces used. \subitem `k8s-pods.tf`, defines the Pods to be deployed. \subitem `k8s-provider.tf`, defines the providers that Terraform needs and certain configurations for each of them. \subitem `k8s-services.tf`, defines the Pods' Services. \subitem `k8s-variables.tf`, variables to be used that are obtained after the provisioning of the cluster. \end{itemize} \end{itemize} \end{itemize} \cleardoublepage % ############################################################################# \chapter{Deployment} \label{chapter:deployment} This project was design to have a easy deployment. So, the first thing to do is to go to the project directory and run the following commands to put up and connect to the `mgmt` - Management \ac{VM} (bastion): \begin{lstlisting} [style=Bash] $ vagrant up $ vagrant ssh mgmt \end{lstlisting} After this, inside the `mgmt`, run the following command to authenticate in the \ac{GCP} (it will give a link, open it on a browser, login in with an IST account, copy the response code and past the verification code in the command line): \begin{lstlisting} [style=Bash] vagrant@mgmt:~$ gcloud auth login \end{lstlisting} Then, inside the `mgmt`, let's create the infrastructure by running the commands (first will initialize Terraform, in order to satisfy some plugin requirements; then it will create a plan and create the infrastructure by running apply): \begin{lstlisting} [style=Bash] vagrant@mgmt:~/tools/terraform/cluster$ terraform init vagrant@mgmt:~/tools/terraform/cluster$ terraform plan vagrant@mgmt:~/tools/terraform/cluster$ terraform apply \end{lstlisting} After this, all the resources will be created. It is possible to see all the resources created running the command \textit{kubectl get all -all-namespaces} that will get all information about all namespaces available. This command also gives some \ac{K8s} resources like \textit{kube-dns} which is not useful for our test case. So let's simple get all resources focus on the project namespaces. First, let's get all resources related to the application namespace running the command \textit{kubectl get all -n application} producing the output in the Figure \ref{fig:all-resources-application-namespace}. \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{./pictures/kubctl-get-all-application-ns.png} \caption{All resources from application namespace} \label{fig:all-resources-application-namespace} \end{figure} In a briefly way, it is possible to see that what was deployed was (more information in Analysis Section \ref{section:analysis}): \begin{itemize} \item RocketChat Pods to handle the requests from the client (RocketChat servers). \item RocketChat Service to load-balance the client requests for the RocketChat Pods. \item MongoDB Pods to handle the requests from RocketChat servers (are the pods called RocketChat-Mongo). \item MongoDB Service to be forward the traffic to MongoDB Pods. \end{itemize} Second, let's get all resources related to the application namespace running the command \textit{kubectl get all -n istio-system} producing the output in the Figure \ref{fig:all-resources-istio-namespace}. \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{./pictures/kubctl-get-all-istio-system-ns.png} \caption{All resources from istio-system namespace} \label{fig:all-resources-istio-namespace} \end{figure} In a briefly way, it is possible to see that what was deployed was (more information in Analysis Section \ref{section:analysis}): \begin{itemize} \item Grafana Pods to handle the requests for monitoring purposes. \item Grafana Service to load-balance the client requests for Grafana Pods. \item Prometheus Pods that collect metrics about the cluster and its resources. \item Prometheus Service to load-balance the client requests for the Prometheus Pods. \end{itemize} After the deployment, if it is needed to use the tool \textit{kubectl} for any purpose, first it is needed to run the following command, changing the parameters with anchors($<$parameter$>$) with the respective project data: \begin{lstlisting} [style=Bash] vagrant@mgmt:~/tools/terraform/cluster$ gcloud container clusters get-credentials <project_name> --zone <project_zone> \end{lstlisting} \section{Notes} \begin{itemize} \item If it is needed more worker nodes, it is possible to do it by changing the \textit{workers\_count} variable in the file \textit{terraform.tfvars}. \item If it is needed more replicas of the RocketChat-server or RocketChat-mongo, it is possible to do it by changing the \textit{replicas} variable in the file \textit{k8s-pods.tf}. For grafana and prometheus the same variable should be change in their respective .yml file inside the monitoring folder. If the cluster is already running it can also be added replicas using the kubernetes command line tool. \item If it is needed to change the region, it is possible to do it by changing the \textit{region} variable in the file \textit{terraform.tfvars}. \item Currently there is a bug by the application's development team that prevents the rocket-chat's setup-wizard from working as intended but that is outside the scope of our work. The problem is in the developer team of RocketChat, because there is full connectivity between the respective resources of the cluster. As we can in \ref{fig:html-from-rocektchar-pod} see rocketchat-server service responds perfectly well to a client request. If that didn't happen, an 50X error would be given. \end{itemize} \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{./pictures/curl-success-page-rocketchat.png} \caption{Portforwarding to the RocketChat Pod and Curl command to get the RocketChat page directly from the Pod is successful} \label{fig:html-from-rocektchar-pod} \end{figure} % ############################################################################# \cleardoublepage % ############################################################################# \chapter{Versioning} \label{chapter:versioning} For this project, the RNL Git (\url{https://git.rnl.tecnico.ulisboa.pt/AGISIT-21-22/team-06}) was used for versioning. \cleardoublepage % ############################################################################# \chapter{Work Evolution} \label{chapter:work-evol} With this project was possible to show an example on how to deploy and provision tiered Microservices-based containerized Web Application on a Public Cloud using automation tools as well as implementing instrumentation on the applications, services and infrastructure components of the solution, to allow monitoring and logging features. Initially, the system was deployed using the following tools: Terraform to create the infrastructure, Ansible to configure the infrastructure created and then Docker Compose to deploy the services needed. This version is called Docker Compose Version. \textbf{The goal for this project was to try several tools and learn as much as possible} and that is the reason why the Docker Compose was chosen over Kubernetes as first hand. With the Docker Compose, some limitations were found. The first limitation is that it was needed more files and commands to run the application because the method used is more manual. With Docker Compose it is possible to configure and start multiple Docker containers on the same host so, it was needed multiple commands Docker Compose to deploy the services needed on each host (There was a host for RocketChat, another for Mongo, one more for Grafana, and another one for Prometheus - as can be seen in the Figure \ref{fig:docker-compose-version-proj-struc} which shows the project structure of the Docker Compose Version). The second limitation found resides in the fact that the scaling up/down mechanism of Docker Compose is also "kinda" manual because it is needed to run a command each time some scaling is needed (for example, running the docker-compose command with the flag \textit{--scale serviceX=5} which would increase the serviceX number of containers to 5). \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{./pictures/docker-compose-version-proj-structure.png} \caption{Docker Compose Version Project Structure} \label{fig:docker-compose-version-proj-struc} \end{figure} After analysing the limitations, the group chose to change the orchestration tool for \ac{K8s} for a better reliable and automated solution. Although more complex, \ac{K8s} gives more flexibility and the possibility of making an Autoscalable System (scaling mechanism can be automatic compared to the scaling mechanism of Docker Compose). Also, in the final solution the deployment process is now faster and more autonomous. %\lipsum[61-63] % ############################################################################# % END OF MAIN DOCUMENT BODY % ############################################################################# % ----------------------------------------------------------------------------- % Bibliography % ----------------------------------------------------------------------------- %\bibliographystyle{IEEEtran} % > entries ordered in the order in which the citations appear, with numeric % reference markers % External bibliography database file in the BibTeX format \cleardoublepage %\bibliography{IST-UL-Project-Report_bib_DB} \begin{thebibliography}{9} % Add entry in the table of contents as chapter \addcontentsline{toc}{chapter}{\bibname} \bibitem{rocketchat} RocketChat. \url{https://rocket.chat/} \bibitem{mongodb} MongoDB. \url{https://en.wikipedia.org/wiki/MongoDB} \bibitem {grafana} Grafana. \url{https://en.wikipedia.org/wiki/Grafana} \bibitem{prometheus} Prometheus. \url{https://en.wikipedia.org/wiki/Prometheus_(software)} \bibitem {rocketchat-create-docker-container} RocketChat. High Availability. Create rocket.chat docker container. \url{https://docs.rocket.chat/quick-start/installing-and-updating/docker-containers/high-availability-install#create-rocket.chat-docker-container} \bibitem{running-mongodb-on-kubernetes} Running MongoDB on Kubernetes with StatefulSets. (2017, Jan 30). Document Website: \url{https://kubernetes.io/blog/2017/01/running-mongodb-on-kubernetes-with-statefulsets/} \end{thebibliography} % ############################################################################# \end{document} % #############################################################################
{ "alphanum_fraction": 0.7022020154, "avg_line_length": 76.157622739, "ext": "tex", "hexsha": "70e58b76ee63062bd84fee7ebfce0696d3798c64", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3ffab05297f263316634953b8fe7a36a061eeabe", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "BFreitas16/WebApp-cloud", "max_forks_repo_path": "docs/latex-files/IST-UL-Project-Report-Main_Document.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3ffab05297f263316634953b8fe7a36a061eeabe", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "BFreitas16/WebApp-cloud", "max_issues_repo_path": "docs/latex-files/IST-UL-Project-Report-Main_Document.tex", "max_line_length": 1405, "max_stars_count": null, "max_stars_repo_head_hexsha": "3ffab05297f263316634953b8fe7a36a061eeabe", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "BFreitas16/WebApp-cloud", "max_stars_repo_path": "docs/latex-files/IST-UL-Project-Report-Main_Document.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6483, "size": 29473 }
\PassOptionsToPackage{unicode=true}{hyperref} % options for packages loaded elsewhere \PassOptionsToPackage{hyphens}{url} % \documentclass[]{book} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \usepackage{fixltx2e} % provides \textsubscript \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{textcomp} % provides euro and other symbols \else % if luatex or xelatex \usepackage{unicode-math} \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} \fi % use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} % use microtype if available \IfFileExists{microtype.sty}{% \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt} } \usepackage{hyperref} \hypersetup{ pdftitle={Outstanding User Interfaces with Shiny}, pdfauthor={David Granjon}, pdfborder={0 0 0}, breaklinks=true} \urlstyle{same} % don't use monospace font for urls \usepackage{longtable,booktabs} % Fix footnotes in tables (requires footnote package) \IfFileExists{footnote.sty}{\usepackage{footnote}\makesavenoteenv{longtable}}{} \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} % Redefines (sub)paragraphs to behave more like sections \ifx\paragraph\undefined\else \let\oldparagraph\paragraph \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} \fi \ifx\subparagraph\undefined\else \let\oldsubparagraph\subparagraph \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} \fi % set default figure placement to htbp \makeatletter \def\fps@figure{htbp} \makeatother \usepackage{booktabs} \usepackage{amsthm} \makeatletter \def\thm@space@setup{% \thm@preskip=8pt plus 2pt minus 4pt \thm@postskip=\thm@preskip } \makeatother \usepackage[]{natbib} \bibliographystyle{apalike} \title{Outstanding User Interfaces with Shiny} \author{David Granjon} \date{2020-04-12} \begin{document} \maketitle { \setcounter{tocdepth}{1} \tableofcontents } \hypertarget{prerequisites}{% \chapter{Prerequisites}\label{prerequisites}} This book requires to be familiar with the R Shiny framework. We recommand reading \ldots{} \hypertarget{intro}{% \chapter{Introduction}\label{intro}} To DO \hypertarget{part-survival-kit}{% \part*{Survival Kit}\label{part-survival-kit}} \addcontentsline{toc}{part}{Survival Kit} \hypertarget{introduction}{% \chapter*{Introduction}\label{introduction}} \addcontentsline{toc}{chapter}{Introduction} This part will give you basis in HTML, JavaScript to get started\ldots{} \hypertarget{part-htmltools}{% \part*{htmltools}\label{part-htmltools}} \addcontentsline{toc}{part}{htmltools} While building a custom html template, you will need to know more about the wonderful \href{https://github.com/rstudio/htmltools}{htmltools} developed by Winston Chang, member of the shiny core team. It has the same spirit as devtools, that is, making your web developer life easier. What follows does not have the pretention to be an exhaustive guide about this package. Yet, it will provide you yith the main tools to be more efficient. \hypertarget{practice}{% \chapter{Practice}\label{practice}} In this chapter, you will learn how to build your own html templates taken from the web and package them, so that they can be re-used at any time by anybody. \hypertarget{selecting-a-good-template}{% \section{Selecting a good template}\label{selecting-a-good-template}} There exists tons of HTML templates over the web. However, only a few part will be suitable for shiny, mainly because of what follows: \begin{itemize} \tightlist \item shiny is built on top of \href{https://getbootstrap.com/docs/3.3/}{bootstrap 3} (HTML, CSS and Javascript framework), meaning that going for another framework might not be straightforward. However, shinymaterial and shiny.semantic are examples showing this can be possible. \item shiny relies on \href{https://jquery.com}{jQuery} (currently v 1.12.4 for shiny, whereas the latest version is 3.3.1). Consequently, all templates based upon React, Vue and other Javascript framework will not be natively supported. Again, there exist some \href{https://github.com/alandipert/react-widget-demo/blob/master/app.R}{examples} for React with shiny and more generally, the \href{https://react-r.github.io/reactR/}{reactR} package developed by Kent Russell (\citet{timelyportfolio} on Twitter) and Alan Dipert from RStudio. \end{itemize} See \href{https://github.com/rstudio/shiny/tree/master/inst/www/shared}{the github repository} for more details about all dependencies related to the shiny package. Therefore in the following, we will restict ourself to Bootstrap (3 and 4) together with jQuery. Don't be disapointed since there is still a lot to say. \begin{quote} Notes: As shiny depends on Bootstrap 3.3.7, we recommand the user who would like to experiment Boostrap 4 features to be particularly careful about potential incompatibilies. See a working example here with \href{https://github.com/RinteRface/bs4Dash}{bs4Dash}. \end{quote} A good source of \textbf{open source} HTML templates is \href{https://colorlib.com}{Colorlib} and \href{https://www.creative-tim.com/bootstrap-themes/free}{Creative Tim}. You might also buy your template, but forget about the packaging option, which would be illegal in this particular case, unless you have a legal agreement with the author (very unlikely however). \bibliography{book.bib,packages.bib} \end{document}
{ "alphanum_fraction": 0.7752969121, "avg_line_length": 41.0064935065, "ext": "tex", "hexsha": "f2980ea007efed261295627d641b55af08aab7cc", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8011b7d801246db2275d7b4ae982d24bd4cded7d", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "flalom/outstanding-shiny-ui", "max_forks_repo_path": "bookdown-demo.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8011b7d801246db2275d7b4ae982d24bd4cded7d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "flalom/outstanding-shiny-ui", "max_issues_repo_path": "bookdown-demo.tex", "max_line_length": 438, "max_stars_count": null, "max_stars_repo_head_hexsha": "8011b7d801246db2275d7b4ae982d24bd4cded7d", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "flalom/outstanding-shiny-ui", "max_stars_repo_path": "bookdown-demo.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1782, "size": 6315 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % GEANT manual in LaTeX form % % % % Michel Goossens (for translation into LaTeX) % % Version 1.00 % % Last Mod. Jan 24 1991 1300 MG + IB % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \Origin{L.Urb\'{a}n} \Revision{J. Chwastowski} \Submitted{26.10.84}\Revised{16.12.93} \Version{GEANT 3.16}\Routid{PHYS231} \Makehead{Simulation of photoelectric Effect} \section{Subroutines} \Shubr{GPHOT} {\tt GPHOT} simulates the photoelectric effect. It uses the following input and output: \begin{DLtt}{MMMMMMM} \item[Input]{via common \Cind{GCTRAK}} \item[Output]{via common \Cind{GCKING}} \end{DLtt} It calls the functions \Rind{GPHSG1}, \Rind{GPHSGP}, \Rind{GAVRL2} and \Rind{GAVRL3} for the cross-section and the functions \Rind{GHPHAK}, \Rind{GHPHAL1}, \Rind{GHPHAL2}, \Rind{GPHAL3} for the cosine distribution of the photoelectron. \Rind{GPHOT} is called by the tracking routine \Rind{GTGAMA}, if, and when, the photon reaches its interaction point. \Sfunc{GAVRL2}{VALUE = GAVRL2(GAMAL2,BETAL2,X)} \Rind{GAVRL2} calculates approximation of the cross-section for the photoelectric effect from $L_{II}$ shell. {\tt GAMAL2}, {\tt BETAL2} are the Lorentz gamma and beta factors of the emitted photoelectron and $X = m_e/E_\gamma$ where $m_e$ is the electron mass and $E_\gamma$ is the incident radiation energy. \Rind{GAVRL2} is called by \Rind{GHPOT}. \Sfunc{GAVRL3}{VALUE = GAVRL3(GAMAL3,BETAL3,X)} \Rind{GAVRL3} calculates approximation of the cross-section for the photoelectric effect from $L_{III}$ shell. {\tt GAMAL3}, {\tt BETAL3} are the Lorentz gamma and beta factors of the emitted photoelectron and $X = m_e/E_\gamma$ where $m_e$ is the electron mass and $E_\gamma$ is the incident radiation energy. {\tt GAVRL3} is called by \Rind{GPHOT}. \Sfunc{GPHAK}{VALUE = GPHAK(BETAK)} \Sfunc{GPHAL1}{VALUE = GPHAL1(BETAL1)} {\tt GPHAK} and its entry point {\tt GPHAL1} poll the cosine distribution (w.r.t. the incident photon direction) of the photoelectron emitted from $K$ or $L_{I}$ shell, respectively. They are called by \Rind{GPHOT}. \Sfunc{GPHAL2}{VALUE = GPHAL2(BETAL2)} \Rind{GPHAL2} polls the cosine distribution (w.r.t. the incident photon direction) of the photoelectron emitted from $L_{II}$ shell. \Sfunc{GPHAL3}{VALUE = GPHAL3(BETAL3)} {\tt GPHAL3} polls the cosine distribution (w.r.t. the incident photon direction) of the photoelectron emitted from $L_{III}$ shell. It is called by \Rind{GPHOT}. \Sfunc{GPHSGP}{VALUE = GPHSGP(EGAM)} \Rind{GPHSGP} calculates the photoelectric effect total cross-section for a particular atom of a mixture taking into account weights. It is called by \Rind{GPHOT} for ${\tt Z} \leq 100$. \section{Method} If the energy of the radiation incident on an atom is $E_{\gamma}$, then the quanta can be absorbed if $E_{\gamma} > E_{shell}$. The photoelectron is emitted with total energy: \begin{equation} E_{photoelectron} = E_{\gamma}-E_{shell}+m_e. \end{equation} In the above equation it is assumed that the atom has infinite mass. One should note that there exists a correction term (see \cite{bib-BETHE} and references therein) which becomes more and more important with increasing $E_{\gamma}$ \cite{bib-HALL} \cite{bib-PRATT} \cite{bib-PRATT1}. \subsection{Probability of Interaction with an Atom} Probability of the interaction with an atom is calculated taking into account partial cross-sections of atoms of a mixture or a compound. \subsection{Probability of Interaction with a Shell} To calculate the probability of the interaction with a particular shell we use the jump ratios defined as: \begin{equation} J_{shell} = \frac{\sigma(E_{shell}+\delta E)}{\sigma(E_{shell}-\delta E)} \label{eq:jumprat} \end{equation} where $\delta E \rightarrow 0$. In addition we assume that the jump ratio is also valid away from the edges.\\ From (\ref{eq:jumprat}) it follows that the probability $p_{shell}$ to interact with a shell is: \begin{equation} p_{shell} = 1-\frac{1}{J_{shell}} \label{eq:probrat} \end{equation} We use the following parametrisation of the jump ratios for $K$ and $L_{I}$ shells\cite{bib-VEIGELE}: \begin{equation} J_K = \frac{125}{Z} + 3.5 \end{equation} \begin{equation} J_{L_{I}} = 1.2 \label{eq:jumpl1} \end{equation} For the $L_{II}$ and $L_{III}$ shells we adopt approximation of the formulae calculated by Gavrila \cite{bib-GAVRILA-L}: \begin{eqnarray} \sigma_{L_{II}} &=& \gamma \beta \frac{m_e}{E_\gamma} \left\{ \gamma^3-5\gamma^2+24\gamma-16 -(\gamma^2+3\gamma-8)\frac{\log(\gamma(1+\beta))}{\gamma\beta} \right\} \label{eq:sigmal2} \end{eqnarray} and \begin{eqnarray} \sigma_{L_{III}} &=& \gamma \beta \frac{m_e}{E_\gamma} \left\{ 4\gamma^3-6\gamma^2+5\gamma+3 -(\gamma^2-3\gamma+4)\frac{\log(\gamma(1+\beta))}{\gamma\beta} \right\} \label{eq:sigmal3} \end{eqnarray} where\\ \begin{tabular}[t]{l@{\hspace{1cm}}p{.7\textwidth}} $ \gamma, \beta $ & are the emitted photoelectron Lorentz gamma and beta factors;\\ $ E_\gamma $ & is the incident radiation energy; \\ $ m_e $ & is the electron mass. \\ \end{tabular}\\ Below an example of the shell interaction probability calculations for $E_\gamma > E_K$ is given.\\ If \begin{eqnarray*} \Sigma_{II,III} & = & \sigma_{L_{II}}+\sigma_{L_{III}} \\ r_{L_{II}} & = & \frac{\sigma_{L_{II}}}{\Sigma_{II,III}} \\ r_{L_{III}} & = & \frac{\sigma_{L_{III}}}{\Sigma_{II,III}} \end{eqnarray*} then from (\ref{eq:probrat}) one can find that \begin{eqnarray*} p_K & = & 1-\frac{1}{J_K} \\ p_{L_1} & = & (1-p_K)\cdot (1 - \frac{1}{J_{L_1}}) \\ p_{L_{II}} & = & (1-p_K-p_{L_I})\cdot r_{L_{II}} \\ p_{L_{III}} & = & (1-p_K-p_{L_I})\cdot r_{L_{III}} \end{eqnarray*} After simple calculations one obtains the probability distribution function which is used to select the shell. \subsection{Angular distributions of photoelectrons} The angular distributions of photoelectrons should be calculated using the partial wave analysis for the outgoing electron. Since this method is very time consuming we use approximations of the angular distributions calculated by F. von Sauter \cite{bib-SAUTER1} \cite{bib-SAUTER2} (K shell) and Gavrila \cite{bib-GAVRILA-K} \cite{bib-GAVRILA-L} (K and L shells). We use the cross-section which is correct only to zero order in $\alpha Z$. This approximation foresees zero scattering amplitude in the direction of incident photon while experimentally the cross-section at $0$ angle is non-vanishing \cite{bib-NAGEL}. If \begin{eqnarray*} X &=& 1-\beta\cos\Theta \end{eqnarray*} then for $K$ and $L_I$ shells we use: \begin{eqnarray} \frac{d\sigma_{K,L_{I}}}{d(\cos\Theta)} &\sim& \frac{\sin^2\Theta}{X^4} \left\{1+ \frac{1}{2}\gamma(\gamma-1)(\gamma-2)\right\} \label{eq:angkl} \end{eqnarray} and for $L_{II}$ and $L_{III}$ shells we have: \begin{eqnarray} \frac{d\sigma_{L_{II}}}{d(\cos\Theta)} &\sim & \frac{(\gamma^2-1)^{\frac{1}{2}}}{\gamma^5(\gamma-1)^5} \left\{ \frac{\gamma(3\gamma+1)}{2 X^4} -\frac{\gamma^2(9\gamma^2+30\gamma-7)}{8 X^3} \right. \nonumber \\ \ & \ & +\frac{\gamma^3(\gamma^3+6\gamma^2+11\gamma-2)}{4 X^2} -\frac{\gamma^4(\gamma-1)(\gamma+7)}{8 X} \\ \ & \ & +\sin^2\Theta \left. \left( \frac{(\gamma+1)}{X^5} -\frac{\gamma(\gamma+1)}{X^4} -\frac{\gamma^2(3\gamma+1)(\gamma^2-1)}{X^3} \right) \right\} \nonumber \label{eq:angl2} \end{eqnarray} \begin{eqnarray} \frac{d\sigma_{L_{III}}}{d(\cos\Theta)} &\sim & \frac{(\gamma^2-1)^{\frac{1}{2}}}{\gamma^5(\gamma-1)^5} \left\{ -\frac{\gamma(3\gamma-1)}{2 X^4} +\frac{\gamma^2(3\gamma^2-1)}{X^3} \right. \nonumber \\ \ & \ & \frac{\gamma^3(\gamma^3-3\gamma^2+2\gamma+1)}{X^3} -\frac{\gamma^4(\gamma-2)(\gamma-1)}{2 X} \\ \ & \ & +\sin^2\Theta \left. \left( \frac{(\gamma+1)}{X^5} -\frac{\gamma(\gamma+1)(3\gamma-1)}{X^4} +\frac{\gamma^2(\gamma^2-1)}{X^3} \right) \right\} \nonumber \label{eq:angl3} \end{eqnarray} The azimuthal angle distribution is uniform. \subsection{Final State} After the photoelectron emission the atom is left in excited state. The excitation energy equal to the binding energy $E_i$ of the shell in which the interaction took place. Subsequently the atom emits a fluorescent photon or Auger or Coster-Kronig electron. The selection of radiative or non-radiative transition is based on compilation by Krause \cite{bib-KRAUSE}.\\ The Auger or Coster-Kronig transitions are represented by the most probable line for a given vacancy \cite{bib-CULLEN}. The emitted electron energy $E_e$ is \begin{equation} E_e = E_i-(E_j+E_k) \label{eq:augere} \end{equation} where $E_i, E_j, E_k$ are the subshell binding energies and $E_j > E_k$.\\ In case of fluorescence we use transition rates of Scofield \cite{bib-SCOFIELD}. We use only those transitions for which the occurrence probability is not less than 1\%. The fluorescent photon is emitted with energy $E_\gamma$ \begin{equation} E_\gamma = E_i-E_j \label{eq:fluore} \end{equation} for transition between the subshells $i$ and $j$.\\ In addition to the above, to fulfill the energy conservation law, emission of an additional photon is simulated. For non-radiative transitions its energy is $E_k$ (see formula \ref{eq:augere}). In case of fluorescent transition this photon has energy $E_j$ (see equation \ref{eq:fluore}). The angular distribution of the emitted particle is isotropic.
{ "alphanum_fraction": 0.6399337361, "avg_line_length": 44.8122270742, "ext": "tex", "hexsha": "b10f2b759e65eb226eb63ecebf39db5c15e62147", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "berghaus/cernlib-docs", "max_forks_repo_path": "geant/phys231.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "berghaus/cernlib-docs", "max_issues_repo_path": "geant/phys231.tex", "max_line_length": 154, "max_stars_count": 1, "max_stars_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "berghaus/cernlib-docs", "max_stars_repo_path": "geant/phys231.tex", "max_stars_repo_stars_event_max_datetime": "2019-07-24T12:30:01.000Z", "max_stars_repo_stars_event_min_datetime": "2019-07-24T12:30:01.000Z", "num_tokens": 3246, "size": 10262 }
\subsection{The Riemann-Hurwitz Theorem} \begin{theorem}[Riemann-Hurwitz] Let $f:R\to S$ be any non-constant analytic map of compact Riemann surfaces, then $$\chi(R)=\deg(f)\chi(S)-\sum_{p\in R}(m_f(p)-1)$$ \end{theorem} \begin{remark} As $R$ is compact, the sum only has finitely many nonzero terms. \end{remark} \begin{proof}[Sketch of proof] As in the proof of the valency theorem, each $q\in S$ has a ``power neighborhood'' $U$ where $f$ restricts to a union of power maps on $f^{-1}(U)$. By compactness, there is a finite open cover $\{U_1,\ldots,U_k\}$ of $S$ where each $U_i$ is a ``power neighbourhood'' of $f$. In particular, the number of branch points is finite. We can subdivide a triangulation on $S$ so that we can eventually reach a triangulation such that each triangle has at most $1$ branch point. We can further subdivide such that each branch point is a vertex. Continue to subdivide so that each triangle is contained in some $U_i$. Now the preimage of this eventual triangulation forms a triangulation of $R$. Let $n=\deg f$ and $V_R,E_R,F_R,V_S,E_S,F_S$ are exactly what you think they mean. Then, intuitively, $F_R=nF_S,E_R=nE_S$ while $$|f^{-1}(\{q\})|=n-\sum_{p\in f^{-1}(\{q\})}(m_f(p)-1)$$ Summing up, $$V_R=nV_S-\sum_{q\in S}\sum_{p\in f^{-1}(\{q\})}(m_f(p)-1)=nV_S-\sum_{p\in R}(m_f(p)-1)$$ which implies the identity. \end{proof}
{ "alphanum_fraction": 0.6659751037, "avg_line_length": 62.8695652174, "ext": "tex", "hexsha": "33294b4406d6ae958a6e51c9867b098102a715b2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "cbda76f7189c679c4aaccf030b70d310823ead3f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "david-bai-notes/II-Riemann-Surfaces", "max_forks_repo_path": "11/thm.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "cbda76f7189c679c4aaccf030b70d310823ead3f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "david-bai-notes/II-Riemann-Surfaces", "max_issues_repo_path": "11/thm.tex", "max_line_length": 152, "max_stars_count": null, "max_stars_repo_head_hexsha": "cbda76f7189c679c4aaccf030b70d310823ead3f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "david-bai-notes/II-Riemann-Surfaces", "max_stars_repo_path": "11/thm.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 475, "size": 1446 }
\documentclass[12pt]{article} \usepackage[utf8]{inputenc} %% Language and font encodings \usepackage[english]{babel} \usepackage[T1]{fontenc} \usepackage{float} \usepackage{algorithm} \usepackage{algpseudocode} \usepackage{amsmath} \algdef{SE}[DOWHILE]{Do}{doWhile}{\algorithmicdo}[1]{\algorithmicwhile\ #1} \usepackage{url} \usepackage[top=2.5cm, bottom=2.5cm, left=2.5cm, right=2.5cm, marginparwidth=1.7cm]{geometry} \usepackage{lastpage} \usepackage{fancyhdr} \pagestyle{fancy} \fancyhead[R]{Page \thepage\ of\ \pageref{LastPage}} \fancyhead[C]{SSMES} \fancyhead[L]{} \usepackage[colorlinks=true, allcolors=blue]{hyperref} \title{School of Science and Math Exam Scheduler (SSMES) Help File\footnote{\url{https://github.com/maxhirsch/ssmes}}} \date{Spring 2019} \begin{document} \maketitle \tableofcontents \newpage \section{Introduction} The School of Science and Math Exam Scheduler (SSMES) is the result of a 2019 Mini-Term project at the North Carolina School of Science and Mathematics which aimed to reduce the number of exam conflicts at the school. Students involved in the Mini-Term were Daniel Carter, Edward Feng, Kathleen Hablutzel, and Max Hirsch. This documentation serves as a help guide in running the interface for the code which minimizes exam conflicts. \section{About the Minimization Algorithm} Below we outline the general algorithm for determining low-conflict exam schedules. It by no means produces the lowest-conflict schedule, but it maintains similar structure to previous exam schedules while reducing conflicts. The algorithm is more precisely described in the code on \href{https://github.com/maxhirsch/ssmes}{Github}. In the algorithm, a \textit{conflict} is when a student has more than one exam in the same exam block. Previously, NCSSM had three exams each day during exam week, allowing another type of conflict in which a student had three exams in a 24 hour period. Our solution was to remove one exam period from each day so that students could no longer have this conflict. \begin{algorithm} \caption{Creating low-conflict exam schedule} \textbf{Result:} Exam blocks with relatively few conflicts \begin{algorithmic}[1] \Procedure{Reduce Conflicts}{} \For{n trials} \State Split departments \State Randomly assign departments to exam blocks \Do \State Swap departments between exam blocks if the swap reduces conflicts \doWhile{conflicts reduced} \EndFor \EndProcedure \end{algorithmic} \end{algorithm} \begin{algorithm} \label{alg:split} \caption{Splitting Departments} \textbf{Result:} Departments which may or may not be split into two \begin{algorithmic}[1] \Procedure{Split Departments}{$definite\_split$, $restricted\_split$} \For{n trials} \State Split $definite\_split$ departments \State Split any non $restricted\_split$ departments if there are courses with no intra-department conflicts \EndFor \EndProcedure \end{algorithmic} \end{algorithm} \section{Available Settings} In this section, we describe each of the settings in SSMES. \subsection{Term Schedule} Under \textit{Settings $\to$ Term Schedule}, the school schedule can be toggled from trimester to semester. \subsection{Total Random Trials} Under \textit{Settings $\to$ Total Random Trials}, the number of random initializations of schedules can be set. The default value is 5. Larger values of this setting will result in fewer conflicts, but the minimization will take a longer time. \subsection{Random Seed} Under \textit{Settings $\to$ Random Seed}, the random seed for Total Random Trials can be set. The default value is 42. \section{Entry Fields} In this section, we describe each of the fields on the main screen of SSMES. \subsection{Term} Term is the trimester (T1, T2, or T3) or semester (S1 or S2) for which you would like to create an exam schedule. The term must chosen in the list before any data can be loaded. \subsection{Data File} The data file field is where you input the name/location of the file which contains course enrollment data. You can browse for this data by clicking "Choose File" beside the entry field. The data file must follow a specific format which we outline below. \subsubsection{Data File Format} The data file with course enrollment data is a .csv file formatted in the following way:\newline\newline StudentID,Trimester,Course1,Course2,...\newline 1,1,0,1,...\newline 2,1,0,0,...\newline 3,1,1,0,...\newline \newline \noindent If on a semester schedule, "Semester" would appear in the header line rather than "Trimester." Course1, Course2,... represent the course numbers with two alphabetic characters followed by three numeric characters (e.g. CH401). Each row is the student ID followed by the term (1, 2, or 3 if a Trimester schedule; 1 or 2 if Semester), and a list of 1s and 0s according to whether or not respectively a student is taking that column's course. An example file can be found on the project \href{https://github.com/maxhirsch/ssmes}{Github}. \subsection{Number of Exam Days} Number of Exam Days is the number of days on which exams will be held. If, for example, it is 4, then there will be $2*4=8$ exam blocks. \subsection{Courses and Excluded Courses} \label{sec:excludedcourses} The Courses field will be populated when you load data in the Data File field. You can then exclude courses from the minimization (e.g. classes without exams or with final papers instead of exams) by clicking the course then clicking "To Excluded." You can move an excluded course back to the Courses field by clicking on the course in the Excluded Courses field and clicking "From Excluded." Once data is loaded from the Data File field, you can also load excluded courses into the Excluded Courses field by choosing an Excluded Courses file. This can be done with the Excluded Courses File field which is found below the Excluded Courses field. This file must be formatted in a particular way which we describe below. \subsubsection{Excluded Courses File} The Excluded Courses file should be formatted such that each excluded course is on its own line with no other characters surrounding it. For example:\newline\newline MA470\newline RE120\newline MA472\newline \newline and so on. \subsection{Departments} The Departments field will be populated when data is loaded in the Data File field. Departments can be split in the algorithm. We allow the user to explicitly split or not split departments by moving departments to the Definite Split Departments and Restricted Split Departments fields respectively in a manner similar to that of moving courses to excluded courses described in subsection \ref{sec:excludedcourses}. The splitting of departments can also be inferred according to the rule described in algorithm \ref{alg:split}. This inference can be enabled by checking the Infer Splits checkbox below the Restricted Split Departments field. \section{Output} The result of the optimization is two files. The user will provide a filename, \textit{fn}, and a popup will allow the user to select a folder to which the files should be saved. The files will be saved as $\textit{fn}\text{\_department\_courses.json}$ and $\textit{fn}\text{\_exam\_blocks.json}$. $\textit{fn}\text{\_department\_courses.json}$ contains the departments and their associated courses (a split department will end with a suffix to denote this). $\textit{fn}\text{\_exam\_blocks.json}$ will contain each exam block with the departments in that exam block. Note that there is no actual ordering to the exam blocks, as the order of the blocks themselves will not affect the total number of conflicts. \end{document}
{ "alphanum_fraction": 0.7866421729, "avg_line_length": 66.2695652174, "ext": "tex", "hexsha": "fe28d671545bd216da03fb3cf5bfaf0cfee48ee1", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a44cd9ac62697cbc655dc0c5fcbb7c46b6fe9b6f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "maxhirsch/Exam-Conflicts", "max_forks_repo_path": "help-file/help.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a44cd9ac62697cbc655dc0c5fcbb7c46b6fe9b6f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "maxhirsch/Exam-Conflicts", "max_issues_repo_path": "help-file/help.tex", "max_line_length": 719, "max_stars_count": null, "max_stars_repo_head_hexsha": "a44cd9ac62697cbc655dc0c5fcbb7c46b6fe9b6f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "maxhirsch/Exam-Conflicts", "max_stars_repo_path": "help-file/help.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1871, "size": 7621 }
\chapter{Floats} \section{Captions} Figures are funny. They are notably different from tables and therefore requires some explanation. Here are the rules: \begin{enumerate} \item The caption is below the image \item The caption is centered if it's short, ie one line. \item But if the text spans multiple lines, then it's left-justified. \end{enumerate} Figure~\ref{figure_example1} has a short caption, and Figure~\ref{figure_example2} has a longer caption, demonstrating the required single-spacing. \begin{figure}[ht] \centering \includegraphics[width=1in]{baylor} \caption{This is a caption for this figure} \label{figure_example1} \end{figure} Figures like tables are floats and can be positioned anywhere. This author favors placing images at the top. But to illustrate, figure~\ref{figure_example2} is placed at the bottom. \begin{figure}[b] \centering \includegraphics[width=0.2\textwidth]{baylor} \caption[The short table of contents version]{An example of a longer figure caption that spans multiple lines and has a corresponding short version for the table of contents.} \label{figure_example2} \end{figure} \section{Tables} Table~\ref{table_fruit} and Table~\ref{table_silly} demonstrate tables. Table captions differ slightly from figure captions, in that they are \textit{always} supposed to be centered, even if they use multiple lines. \begin{table}[h] \centering \caption[Fruits by color]{\centering Fruits listed by their color. Note that captions differ from figure captions in that they are \textit{always} supposed to be centered, even if they use multiple lines.} \label{table_fruit} \begin{tabular}{rl} \hline \abovespace\belowspace Fruit & Color \\ \hline \abovespace Orange & Orange \\ Blue & Blueberry \\ Red & Cherry \\ Green & Apple \\ Yellow & Banana \\ Purple & Eggplant \belowspace \\ \hline \end{tabular} \end{table} Tables can be anywhere in the text. They should be referred to \textbf{before} they make an appearance. Tables can be placed between text, top of page, or bottom of page. This author personally prefers bottom of page. There has to be triple space before the table captions, double space between caption and table, and triple space after the table. The intext table (table~\ref{table_silly}) might look like it has more space after the table and before the text But that's just because the last item on the table is a horizontal line. At the bottom of this page, is an example of a table set to [b]. This demonstrates the prettiness available to us by using tables at the bottom. Bottom tables rock. As do top tables. \begin{table}[b] \centering \caption[A bottom table]{A botom table illustrated} \label{table_silly} \begin{tabular}{ccc} \hline \abovespace\belowspace A & B & C \\ \hline \abovespace 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \belowspace \\ \hline \end{tabular} \end{table} Nam dui ligula, fringilla a, euismod sodales, sollicitudin vel, wisi. Morbi auc- tor lorem non justo. Nam lacus libero, pretium at, lobortis vitae, ultricies et, tellus. Donec aliquet, tortor sed accumsan bibendum, erat ligula aliquet magna, vitae ornare odio metus a mi. Morbi ac orci et nisl hendrerit mollis. Suspendisse ut massa. Cras nec ante. Pellentesque a nulla. Cum sociis natoque penatibus et magnis dis par- turient montes, nascetur ridiculus mus. Aliquam tincidunt urna. Nulla ullamcorper vestibulum turpis. Pellentesque cursus luctus mauris. \begin{table}[!t] \centering \caption{This is a Top Positioned Table} \begin{tabular}{ l c c c c } \hline \multirow{2}{*}{Interface} & \multicolumn{2}{c}{Completion Time} & \multicolumn{2}{c}{Throughput} \\ {} & Mean & Stdev & Mean & Stdev \\ \hline Mouse only & 74s & 5.19 & 4.33bps & 0.35 \\ Mouse \& speech & 114s & 9.74 & 4.58bps & 0.46 \\ Gestures only & 116s & 14.76 & 2.66bps & 0.40\\ Gestures \& Speech & 136s & 14.82 & 2.83bps & 0.49\\ \hline \end{tabular} \label{tab:ranking} \end{table} Nam dui ligula, fringilla a, euismod sodales, sollicitudin vel, wisi. Morbi auc- tor lorem non justo. Nam lacus libero, pretium at, lobortis vitae, ultricies et, tellus. Donec aliquet, tortor sed accumsan bibendum, erat ligula aliquet magna, vitae ornare odio metus a mi. Morbi ac orci et nisl hendrerit mollis. Suspendisse ut massa. Cras nec ante. Pellentesque a nulla. Cum sociis natoque penatibus et magnis dis par- turient montes, nascetur ridiculus mus. Aliquam tincidunt urna. Nulla ullamcorper vestibulum turpis. Pellentesque cursus luctus mauris.
{ "alphanum_fraction": 0.7379295346, "avg_line_length": 38, "ext": "tex", "hexsha": "8d38142ffc49b6210e8b5a0991d9d5ddb4e28037", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a87684c7e9c1d7250922d00f37f31ae242dcc363", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "Malificiece/Leap-Motion-Thesis", "max_forks_repo_path": "docs/Thesis tools/ch3.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a87684c7e9c1d7250922d00f37f31ae242dcc363", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "Malificiece/Leap-Motion-Thesis", "max_issues_repo_path": "docs/Thesis tools/ch3.tex", "max_line_length": 145, "max_stars_count": null, "max_stars_repo_head_hexsha": "a87684c7e9c1d7250922d00f37f31ae242dcc363", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "Malificiece/Leap-Motion-Thesis", "max_stars_repo_path": "docs/Thesis tools/ch3.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1384, "size": 4598 }
\documentclass{lib/styles/default-style} \begin{document} \pagestyle{no-numbered} \unnumberedSection{\textbf{SET}} \subsection*{\textbf{Description}} Set uses to to simplify code writing, when build tables and lists At first, init named set, with: \verb+\initSet{setName}+ command Lets create 'TestSet' set, and write some data \initSet{TestSet} \subsection*{\textbf{Write}} Write operation: \verb+\addVariableToSet{setName}{data}+ Lets push value 'First index data' \addVariableToSet{TestSet}{First index data} \subsection*{\textbf{Read}} Now you can access data, using \verb+\getVariableFromSet{setName}{index}+ command So, value in TestSet[0] = \getVariableFromSet{TestSet}{0} \end{document}
{ "alphanum_fraction": 0.7667140825, "avg_line_length": 27.0384615385, "ext": "tex", "hexsha": "17f80ec29ac37e52a6d09e42fbdb4ff4c2b6337f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4838f5aaf16c08c2b9766eb4978d4c4d358c5f52", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "crimthy/disertation-latex-boilerplate", "max_forks_repo_path": "examples/set-example.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4838f5aaf16c08c2b9766eb4978d4c4d358c5f52", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "crimthy/disertation-latex-boilerplate", "max_issues_repo_path": "examples/set-example.tex", "max_line_length": 81, "max_stars_count": null, "max_stars_repo_head_hexsha": "4838f5aaf16c08c2b9766eb4978d4c4d358c5f52", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "crimthy/disertation-latex-boilerplate", "max_stars_repo_path": "examples/set-example.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 199, "size": 703 }
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage{charter} \usepackage{helvet} \usepackage{comment} \usepackage{graphicx} \usepackage{etaremune} %\usepackage{mathtools} %Do I need this for basic formulas? \usepackage{titlesec} \usepackage{marvosym} \usepackage{hyperref} \titleformat*{\subsubsection}{\large\bfseries\itshape} \begin{comment} To-do list: *Remember to sync this up with ShareLaTeX. *Change monospace font to Cousine. *Book title (Unofficial CompTIA\textsubscript{®} A+ Study Guide: 820-901) and cover (brown monochrome photo of a computer, text in Highway Gothic?), note that CompTIA is a registered trademark and I may have to drop their branding. I can claim however I will be a certified engineer. *Combine 901 and 902 together. *Add an Indicia (with license and contributors, images used under fair-use, copyrights...), Disclaimer (book is strictly unofficial), ToC and Preface (about this book, has a more general focus on all-round, flaws of CompTIA\textsubscript{®}, the expense of course + materials, booking the exam with extra time difficult, the fact IT departments don't really update their tech very often). *Might be worth adding a glossary and commandline command list (with arguments and operating system support) later too as appendices (everything mentioned in monospace). Along with some exam assistance? *Fix formatting -- Definitions in bold, commandline commands and lowercase program/daemon names in monospace... *Also create Extra Credit boxes that include information that is outside the CompTIA\textsubscript{®} scope but still cool to know. *Eventually a Project Zylon book? :-) \end{comment} \title{CompTIA\textsubscript{®} A+ 2017 901 and 902 Notes} \author{\textsc{Hal Motley}} \date{} \begin{document} \maketitle \begin{center} All text and images unless otherwise stated are licensed under Attribution-ShareAlike 3.0 Unported (CC BY-SA 3.0), which is available at: \url{https://creativecommons.org/licenses/by-sa/3.0/} \end{center} \chapter{901} \section{Hardware} \subsection{BIOS and UEFI} \noindent{BIOS (Basic Input/Output System) and UEFI (Unified Extensible Firmware Interface)}. A feature of the BIOS/UEFI is the capability to turn on/off certain features including USB ports. Which is a great alternative to physically removing the cable from the motherboard header. The BIOS/UEFI can be accessed via several keys, usually F1. There are also options to set a supervisor password password for the BIOS screen or even a user password for the pre-boot process for the OS. BIOS is around 25 years old. UEFI is much newer, based on Intel's original EFI standard and supersedes the old BIOS. It has the benefit of booting over large GPT partitions (over 2.2TB) as well as offering a pre-boot OS-style utilities such as a command-line shell, drivers and applications. Some even offer web browsing and storage drive backup as well as remote diagnostics. BIOS settings used to be stored in non-volatile CMOS memory (actual CMOS memory) which much like the RTC chip requires a battery. Nowadays, we use flash memory to store BIOS settings and the CMOS handles just the RTC settings. If the RTC battery fails then these settings are requested each boot, although the clock can be synchronised to the NTP server within the OS. \subsubsection{The BIOS/UEFI boot order} \begin{enumerate} \item \textbf{Initialise the system.} Checks the hardware components such as the CPU, RAM via \textit{POST (Power-On Self Test)}. If any of these tests fail, an error message will appear notifying the user. \item \textbf{Check storage for bootloader.} Checks for the computer's bootloader location within the hard drive (unless this has been overridden with by changing the boot order settings). \item \textbf {Attempts to boot from bootloader's location.} Once verified, an attempt to boot from bootloader is made. If successful the OS will begin booting, if not an error should appear. \end{enumerate} \subsubsection{Installing BIOS/UEFI upgrades} Sometimes it may be necessary to upgrade the BIOS/UEFI to fix bugs or add features. This process involves flashing a new firmware to the chip. While some motherboards have more than one BIOS chip (such as Gigabyte's DualBIOS) most only have one. Like any data being written to a storage device, an interruption in the power supply or a premature disconnection of the device can risk corruption. This is \textit{extremely dangerous} to the BIOS chip and can risk bricking the motherboard in this event. It's best to plug any laptops into mains power and have the battery charged or use an \textit{UPS (Uninterruptable Power Supply)} on desktops to prevent such as circumstance from occurring. \begin{enumerate} \item \textbf{Check the BIOS/UEFI version.} This can be done by checking the BIOS/UEFI itself in the screen or alternatively via \textit{BIOS Version/Date} in the System Information \texttt{msinfo} program in Windows. It's also recommended to download a copy of the current version of the BIOS from the motherboard manufacturer's website as a precaution. \item \textbf{Check the features/bug-fixes in the new update.} It's worth checking whether the BIOS update is addressing any problems you've had on your system. Are these new features useful to you and the user? Some updates are more worthwhile than others. \item \textbf{Prepare the installation media.} For really old machines, this means finding a boot disk. For more modern systems, a typical 1GB+ flash drive should be sufficient for other upgrades that can't be done through Windows directly (close all applications if you are doing this through the Windows userland). \end{enumerate} \subsubsection{BitLocker encryption and LoJack anti-theft} Another option is full-disk encryption such as BitLocker (probably backdoored LOL, see security through obscurity argument) which encrypts the entire hard drive volume and requires a password for unlocking. This process can require a \textbf{TPM (Trusted Platform Module)}, which is a hardware device that connects to a set of GPIO pins on the motherboard (or built-in) and does cryptographic tasks, either needs to be added or is there by default. Alongside BitLocker, there is the the LoJack system for Windows (formerly CompuTrace) which was originally used for automobile theft recovery. It can automatically be asked to send location information about the machine back to the owner or alternatively wipe itself or request a boot password. LoJack itself is built into the BIOS chip ensuring it is always with the machine and cannot easily be tampered with or removed. Parallels can be made between LoJack and FindMyiPhone/Android Device Manager for smartphones and tablets. \subsubsection{Secure Boot} Secure Boot is a part of the UEFI standard and covers a means to boot only operating systems that have matched a whitelist of cryptographically secure signatures. Supports Windows 8, 8.1, 10, Server 2012 (and Server 2012 R2) and many GNU/Linux distros such as Ubuntu, Fedora and OpenSUSE. \subsection{Motherboards} \noindent{The motherboard (occasionally shortened to ``mobo'') or mainboard is main PCB of which all components in a PC connect to.} The main standard for motherboard sizes is \textbf{ATX (Advanced Technology Extended)}, which goes back to 1995 when Intel standardised it. Each modern motherboard uses standardised connectors: \begin{enumerate} \item \textbf{20/24 pin power connector (P1).} Standard large power connector that goes from the PSU to the motherboard. The 24 pin connector has an additional 4 pins that connect separately from the 20 pin connector, allowing maximum flexibility with the PSU. \item \textbf{4/6/8 pin CPU power connector (P2).} Standard smaller power connector designed specifically to supply the CPU(s) on the motherboard. \item \textbf{3/4 pin CPU fan.} Standard fan connector for the CPU fan. Usually 4 pins but sometimes 3 instead, there's a notch on the fan connector to ensure it only goes in one way. For extra credit know that 4 is preferred as that allows the fans to utilise \textbf{PWM (Pulse Width Modulation)} which automatically adjusts the speed keeping the computer quieter and slightly more energy efficient. 4 pin fans are backwards compatible with 3 pin connectors but won't use PWM. \item \textbf{3/4 pin Case fan.} Identical to the CPU fan pins, just several of them dotted around the board. Most boards still use 3 pins, having 4 pin connectors as standard is considered modern. If you run out of these pins, you may need an external fan controller. \item \textbf{PCIe (Peripheral Component Interconnect--express) slots.} Current motherboards use the PCI Express (PCIe) standard to connect peripherals. Their width is generally proportional to the power consumption. PCIe supersedes the PCI, AGP and PCI-X standards respectively. \item \textbf{Back I/O ports.} These are the ports that the user can utilise to connect directly to the back of the motherboard. \item \textbf{Front panel headers.} These pins connect directly to the case governing the power and reset buttons, along with the power and hard drive activity LEDs that go alongside them. \item \textbf{CMOS Battery.} This lithium button battery powers the volatile flash memory storing the \textbf{RTC (Real Time Clock)} information. When removed (or if the battery expires) the computer will prompt for the time and date. The full name of the acronym for \textbf{CMOS (Complementary Metal Oxide Semiconductor)} is seldom used these days. \item \textbf{BIOS/UEFI chip(s).} Standard EEPROM chip(s) that store the motherboard's BIOS/UEFI. As mentioned earlier in the BIOS/UEFI section some boards have two or more to safeguard the board against firmware corruption and therefore minimise bricking. \item \textbf{Mounting holes.} These screw holes are designed to allow easy consistent mounting of the motherboard to the case. When mounting the motherboard remember to use stand-off screws to prevent shorting out the motherboard (some modern cases elevate the mounting area so stand-offs aren't needed). \end{enumerate} \noindent{There are also IDE and PCI connectors, that look as follows:\\} %Small images of IDE and PCI connectors. \begin{figure} \includegraphics[width=\linewidth]{images/png/20170202152238_big2.png} \caption{Motherboard diagram done to CompTIA\textsubscript{®}'s specification. Note that being a more modern board, the manufacturer has omitted many of the legacy connectors such as IDE and PCI. Along with helpfully labelling many of the pins and connectors. \\ \textit{(Image courtesy of Gigabyte's GA-Z270X-UD3 specification page, used under fair use.)}} \label{fig:MBDiagram} \end{figure} \noindent{These are the standard motherboard sizes, plus a couple of others. The boards from ATX to Mini-ITX can be found in conventional PCs all over the world.}\\ \begin{tabular}{ |p{3cm}||p{3cm}|p{4.8cm}| } \hline \multicolumn{3}{|c|}{\textbf{Motherboard Sizes}} \\ \hline Name & Size in mm & Usage\\ \hline ATX & 304mm by 244mm & Small servers and large PCs\\ Micro-ATX & 244mm by 244mm & Ordinary workstation PCs\\ Mini-ITX & 170mm by 170mm & HTPCs, small workstation PCs\\ Nano-ITX & 120mm by 120mm & Embedded PCs\\ Pico-ITX & 100mm by 72mm & Carputers, Embedded PCs\\ \hline \end{tabular} \subsubsection{Motherboard Buses} A bus is simply the connection path between components. They usually connect through the North Bridge or South Bridge (see Figure 1). The bus size is defined by width and speed measured in cycles per second using either Megahertz or Gigahertz, with 1MHz being equal to 1 million cycles per second and 1,000MHz = 1GHz. An interesting caveat however is that clock speed isn't necessarily representative of how quickly data flows down the bus. For example; DDR3 SDRAM can transfer 64 times the memory clock speed and by using multiple techniques data.\\ \begin{figure} \includegraphics[width=\linewidth]{images/png/NorthBridge-SouthBridgeDiagram.png} \caption{Motherboard Chipset diagram done to CompTIA\textsubscript{®}'s specification.} \label{fig:NSBridge} \end{figure} \begin{tabular}{ |p{1cm}||p{1cm}|p{1cm}|p{2.5cm}|p{1.5cm}| } \hline \multicolumn{5}{|c|}{\textbf{Current expansion standards (Last 20 years)}} \\ \hline Name & Year & Width & Speed & Type\\ \hline PCI & 1992 & 32/64 & variable & Parallel\\ AGP & 1996 & 32 & Upto 2133MB/s & Parallel\\ PCI-X & 1998 & 64 & 1064MB/s & Parallel\\ PCIe & 2004 & 1-32 & variable & Serial\\ \hline \end{tabular} PCI supports both 32 and 64 bit. \subsection{IDE (Integrated Drive Electronics) and SATA (Serial ATA)} \noindent{In order to connect hard drives, optical drives and old floppy disk/ZIP disk drives to computers computers use one of the two current bus standards:} \begin{itemize} \item \textbf{IDE (Integrated Drive Electronics)} is a parallel bus. Devices work in a master and slave configuration... Parallel buses..., however the catch is that the performance of the entire cable is as fast as its slowest device. It's recommended to separate hard drives and optical drives. \item \textbf{SATA (Serial ATA)} is as its name states a serial bus and is connected via a... \end{itemize} \subsection{RAM} RAM is the temporary volatile memory. The initial two form factors \textbf{SIMM (Single Inline Memory Module)} and \textbf{DIMM (Dual Inline Memory Module)} describe if they have chips on one side only (SIMM) or both (DIMM). Data rate is defined as how much data can go simultaneously down the RAMs' own buses. It can be the superceded \textbf{SDR (Single Data Rate)} or the modern \textbf{DDR (Double Data Rate)}.\\ \begin{tabular}{ |p{3cm}||p{3cm}|p{3cm}|p{3cm}| } \hline \multicolumn{3}{|c|}{\textbf{RAM Types}} \\ \hline Name & Size in inches & Usage\\ \hline 30-pin SIMM & 3.5'' by 0.75'' & Old PC RAM\\ 72-pin SIMM & 4.25'' by 1'' & Old PC RAM\\ 168-pin DIMM & 5.375'' by 1'' & Standard PC RAM\\ 144-pin SODIMM & 2.625'' by 1'' & Laptop RAM\\ 72-pin SODIMM & 2.375'' by 1'' & Laptop RAM\\ \hline \end{tabular} \subsection{CPU (Central Processing Unit)} \subsection{Hard Drives} \subsubsection{Mechanical Hard Drives (Hard Disk Drives)} \subsubsection{Solid State Drive (SSD)} \subsubsection{Hybrid Hard Drives} \subsubsection{RAID (Redundant Array of Independent Disks)} \textbf{RAID (Redundant Array of Independent Disks)} (sometimes Inexpensive, but CompTIA\textsubscript{®} prefers \textit{Independent}) is a multiple hard-drive configuration (utilising striping, parity or mirroring) among server configurations, NAS setups and computer enthusiasts' rigs alike. As its name implies the primary function of RAID is usually to provide redundancy. Therefore if one hard drive fails, it can be swapped out with minimal damage to the overall system. While there are many standard RAID configurations (and some that are not standard), these are the main few that both CompTIA\textsubscript{®} and the industry focus on. \begin{itemize} \item \textbf{RAID 0 (Disk Striping without a Parity Bit).} The data is written to a stripped set of equal space among several disks. This can be seen as aggregating multiple hard drives' capacities together and taking advantage of maximum disk space. The downside is the lack of fault tolerance due to the lack of a parity bit. This means that even if one drive fails there's a substantial risk of data loss (or expensive data recovery). \item \textbf{RAID 1 (Data Mirroring).} The data is written to 2 or more hard drives simultaneously. As all drives contain the same data, because redundancy is created a bad drive can be swapped out with no overall loss to the system. \item \textbf{RAID 5 (Striping without a Parity Bit + Mirroring).} The data is written in a combination of RAID 0 and RAID 1, allowing the benefits of both. Each of the stripes places data on $n-1$ disks ($n$ being the total amount of disks available). In the event of a drive failing, the parity bit will assist the other drives in finding out what the failed drive contained and rewriting what was lost to a new drive. \item \textbf{RAID 6 (Double Parity).} The data is written using two parity stripes for each drive. It allows for two drive failures before any data is lost, therefore increasing redundancy. \end{itemize} \subsection{Flash Memory} \subsection{PSU (Power Supply Unit)} The \textbf{PSU (Power Supply Unit)} provides power to a computer's components. It converts standard alternating current electricity from the wall outlet into direct current. \begin{itemize} \item \textbf{Power rating.} A power supply's rating or capacity is the maximum amount of power that it can handle. This rating is measured in watts (or kilowatts) the standard SI unit for power. \item \textbf{Rail.} A rail is a single voltage being applied by the PSU. Sometimes PSUs have multiple 12V rails. \end{itemize} \section{Networking} \subsection{Common Types of Network} \begin{itemize} \item \textbf{WAN (Wide Area Network).} A network that spans a large area which could involve anything from a few buildings to an entire country to the entire world. An example would be of course the Internet itself, but outside of that an example of a WAN would be several university campuses connected together. Usually a minimum of two routers are required for a WAN. \item \textbf{LAN (Local Area Network).} A network that usually spans only one building or even one room is a LAN. There is usually only one router that serves the clients. An example would be a domestic household. \item \textbf{PAN (Personal Area Network).} A network that usually spans just one or two devices. An example would be using Bluetooth to transfer files or media between two devices. \end{itemize} \subsection{Network Topologies} A network topology is the arrangement of the networking equipment (routers, switches, access points...), client machines (PCs, laptops, smartphones, tablets...), servers (file server, print server, web server...) and devices (printers...). Along with the cabling/wireless connections that go between them. Here are the common types of topology: \begin{itemize} \item \textbf{Bus} -- Considered very cheap and easy to install, but difficult to reconfigure. Also like a series electric circuit (remember old Christmas lights?) if there's one cable break the system goes down. \item \textbf{Star} -- Also inexpensive, but easy to reconfigure. Data can go through different routes, so it can handle a single cable break. \item \textbf{Ring} -- Speed efficient and easy to install, however it's expensive to both implement and reconfigure. \item \textbf{Mesh} -- Best fault tolerance as each component has direct connections to each other. Reconfiguration however is difficult and expensive. \item \textbf{Hybrid} -- Combines all the features of each topology, though its considered complex (less so than Mesh). \end{itemize} %topology diagrams \subsection{Network Cabling} %Twisted Pair, Untwisted Pair, Coaxial..., patch panels, RJ45 and RJ11 standards and the tools. \begin{itemize} \item \textbf{Loopback plug.} Used for performing a loopback test. A test that checks a computer's Ethernet port is functioning correctly and that the pins in the connector respond to network signals. Usually the plug itself is a small male RJ-45 connector with crossed over wiring. \item \textbf{Crimp tool.} Used for cutting, stripping down and terminating (putting connectors on) cabling. Useful if you want to make your own cable of a non-standard size or save money by buying all the components yourself. \item \textbf{Tone tool.} Used for identifying where cables are in a patch panel. \item \textbf{Punchdown tool.} Used for placing cabling into patch panels and punchdown blocks. \end{itemize} \subsubsection{EMI (ElectroMagnetic Interference)} An important factor when planning the cabling is EMI (ElectroMagnetic Interference). When a metal wire has an electric current put through it a magnetic field is generated. This magnetic field can disrupt other cables and devices within the magnetic field's area, particularly if the cabling has a large current going through it such as modern fluorescent lighting. To get around this, ... are used to put network cabling away from interference. Alternatively fibre cabling uses photons rather than electrons to transport data and is unaffected by EMI. \subsection{Wireless Networking (Wi-Fi)} In this modern era of computing wireless networking is more common than ever before... The current standard for wireless networking is Wi-Fi or Wireless Fidelity and is defined by the IEEE 802.11 standard. As the standard has progressed there have been several amendments to enable it to work over larger distances, new devices and enhance overall connectivity. %a, b, g, n, ac... ranges \begin{itemize} \item \textbf{a} -- Has a speed of x Mb/s and range of x metres. \item \textbf{b} -- Has a speed of x Mb/s and range of x metres. \item \textbf{g} -- Has a speed of 54 Mbps and range of 22 metres (75 feet). \item \textbf{n} -- Has a speed of Mbps and range of x metres. \item \textbf{ac} -- Has a speed of x Mb/s and range of x metres. \end{itemize} %frequencies and channels \subsubsection{SSID (Service Set IDentifier)} The \textbf{SSID (Service Set IDentifier)} is the name of the access point that is broadcast to devices within range. This can allow easy connectivity. For security and convenience purposes it is recommended you change the SSID of all your access points with the settings from your web browser. You can also hide the SSID from being publically broadcast and most modern operating systems allow you to manually type its name when connecting. \subsubsection{Wireless Security} \begin{itemize} \item \textbf{WEP (Wired Equivalent Privacy).} The most primitive and notoriously unsecure means of encrypting a wireless access point. It can easily be bypassed even with strong passwords using methods such as packet sniffing tools like \texttt{aircrack-ng}. AVOID! \item \textbf{WPA (Wi-Fi Protected Access).} \item \textbf{WPA2 (Wi-Fi Protected Access 2).} Currently the strongest and most recommended way to secure an access point. Unless the password is incredibly weak it should be sufficient. \end{itemize} With these wireless security types you can decide on the actual encryption method. \begin{itemize} \item \textbf{TKIP (Temporal Key Integrity Protocol).} Old standard, used... \item \textbf{AES (Advanced Encryption System).} AES is often used in many fields for encryption and is absolutely recommended along with WPA2 for securing a wireless access point down. \end{itemize} \subsection{MAC (Machine Access Code) Filtering} \noindent{Sometimes filtering out by \textbf{MAC (Machine Access Code)} address might be the best way to secure a particular access point.} When enabled, the access point will only accept connections from a whitelist of MAC addresses and refuse the rest (similarly to a firewall). MAC filtering however isn't foolproof and there are ways to spoof a MAC address, although the attacker would have to know the precise MAC address of the machine that is allowed through. \subsection{Useful Command Prompt Networking Commands} \noindent{When networking its very useful to know these following commands for troubleshooting. This list is Windows-oriented for both Command Prompt and Powershell (though some commands such as \texttt{ping} work the same on most shells), however there is an exhaustive list of commands, their associated arguments and operating system support in the appendix.} Here the IP addresses used are merely for demonstrative purposes. For commands that use or require a global IP address, there is \texttt{8.8.8.8} (which is a Google DNS server) and \texttt{192.168.0.2} for commands that require a local one. %Commmand Prompt screenshots \begin{itemize} \item \texttt{ping}, e.g. (\texttt{ping 8.8.8.8}) -- \texttt{ping} is a command that is usually used to test a data connection (client or server) or find an IP address. A data packet is sent from the client to the destination defined by the IP address requested, \texttt{ping} then logs this and feeds the route back to the user. \item \texttt{tracert}, e.g. (\texttt{tracert 8.8.8.8}) -- \texttt{tracert} or \textit{traceroute} is a command that is used to query what path the packets are going through within 30 hops (a hop being a node the packets go through). \item \texttt{nbtstat}, e.g. (\texttt{nbstat 192.168.0.2}) -- \texttt{nbtstat} or \textit{NetBIOS stats} is a command that is used to show... As NetBIOS is a predecessor to modern TCP/IP, it is often not found in versions of Windows nor is it really recommended to use practically in this day and age. %fix ^^^ \item \texttt{netstat}, e.g (\texttt{netstat -a}) -- \texttt{netstat} or \textit{network stats} is a command that shows all the active network connections that machine has, this can be useful for finding out if a machine has malware, if the machine's user is using unauthorised software or to assist with firewall configuration. In the example above all connections will be shown to the user. \item \texttt{nslookup}, e.g (\texttt{nslookup 192.168.0.2}) -- \texttt{nslookup} or \textit{name server lookup} is a command that helps diagnose DNS issues... shows the computer's name and global IP address. \end{itemize} \subsection{Protocols and Port Numbers} \noindent{A protocol the means of different computers being able to communicate with each other over a network. } There are thousands of protocols out there that can do many different things over a network. CompTIA\textsubscript{®} insists that you learn these protocols and the default ports associated with them. For old protocols that are often not used anymore I have denoted them as archaic and suggested alternatives in their respective descriptions. \begin{itemize} \item \textbf{AFP (Apple File Protocol).} Apple's own archaic protocol for transfering files. Modern Macs use SMB2 instead. \item \textbf{FTP (File Transfer Protocol).} Standard protocol used to transfer files between one computer and another. FTP by itself is unsecure and it is recommended to use \textbf{SFTP (Secure FTP)} in a production environment or when wanting to transfer files over a WAN. \item \textbf{SSH (Secure SHell).} Protocol that provides a commandline shell such as Command Prompt or Bash over a network, but unlike Telnet it's secure by default using public-key cryptography. \item \textbf{Telnet.} An archaic and unsecure means of accessing a commandline shell over a network. It has been superseded by SSH, but can still be used for legacy purposes. \item \textbf{SMTP (Standard Mail Transport Protocol).} Standard protocol for sending e-mail from a client to a server or from a server to a server. \item \textbf{DNS (Domain Name Server).} Standard protocol for converting a domain (such as \texttt{example.com}) into an IP address (such as \texttt{93.84.216.34}). This process can be easily demonstrated using the \texttt{ping} command in your commandline shell of choice. \item \textbf{HTTP (HyperText Transfer Protocol).} Standard protocol for retrieving and updating web pages. Still used on many web pages particularly ones that just provide public information, but many modern sites (particularly large companies) to tend to use HTTPS as standard because it provides security. \item \textbf{HTTPS (HyperText Transfer Protocol Secure).} Has identical functionality to HTTP, but retrieves and updates the pages using public-private key authentication. This is essential for websites handling personal information such as names, addresses and bank details. \item \textbf{DHCP (Dynamic Host Control Protocol).} Standard protocol for dynamically (meaning IPs that change) assigning IP addresses to devices on a network. A DHCP server is recommended for large networks, but in a domestic setting this tends to be handled by the router itself. \item \textbf{SMB (Server Message Block) and CIFS (Common Internet File System).} Standard protocol for accessing certain shared folders on a local network device. SMB is a dialect of CIFS and is recommended to use. For extra credit, know that GNU/Linux users tend to use the free, open-source implementation of this protocol called Samba. \item \textbf{SNMP (Simple Network Management Protocol).} Standard protocol for collecting information from network connected devices. When setup the SNMP agents (devices that utilise SNMP) will feedback data to the networking hardware requesting it. This protocol is useful for diagnosing issues with a network. \item \textbf{LDAP (Lightweight Directory Access Protocol).} Standard protocol for directory server programs such as Microsoft's Active Directory or Red Hat's 389 Directory Server to communicate with client machines. \item \textbf{RDP (Remote Desktop Protocol).} Standard protocol mostly used by Windows machines that allows remote access to the desktop of a client machine. It can be useful for remote management or for assisting in a technical issue a user might have. For extra credit, know that the popular alternative is VNC. \item \textbf{POP3 (Post Office Protocol)} An archaic (but still used) protocol for receiving e-mail that merely downloads it onto the client machine. It's acceptable to use if there is only one planned client and no need to synchronise between multiple e-mail clients such as user's smartphone and laptop. \item \textbf{IMAP4 (Internet Mail Access Protocol).} Standard protocol for receiving e-mail, unlike its predecessor POP3 this protocol does more than merely downloading the mail off a server. It also synchronises the user's mailbox with the server making it easier for the user to access their mail on multiple devices. \end{itemize} \begin{tabular}{ |p{3cm}||p{2cm}|p{5cm}| } \hline \multicolumn{3}{|c|}{\textbf{Protocols and Port Numbers}} \\ \hline Service & Protocol & Default Port\\ \hline AFP & TCP & 427, 548 \\ FTP & TCP & 20, 21\\ SSH & TCP & 22\\ %SSH is cool like Madness Day, 22nd September Telnet & TCP & 23\\ SMTP & TCP & 25\\ DNS & TCP/UDP & 53\\ %Deck of cards + a Joker HTTP & TCP & 80 \\ HTTPS & TCP & 81 \\ DHCP & UDP & 67,68\\ SMB/CIFS & TCP & 445 (TCP), 137--139 (NetBIOS)\\ SNMP & UDP & 161 \\ LDAP & TCP & 389\\ RDP & TCP & 3389\\ POP3 & TCP & 110\\ IMAP4 & TCP & 143\\ \hline \end{tabular} \subsection{Port Forwarding and Port Triggering} \subsection{OSI (Open Systems Interconnection) Model} In order to better define and illustrate the communications that computers make between each other, ISO (International Standards Organisation) developed the OSI (Open Systems Interconnection) Model. Learning the OSI Model is essential for a network engineer, but also really useful for any kind of IT professional or enthusiast. \begin{etaremune} \item \textbf{Application Layer} -- \item \textbf{Presentation Layer} -- \item \textbf{Session Layer} -- \item \textbf{Transport Layer} -- \item \textbf{Network Layer} -- \item \textbf{Data Link Layer} -- \item \textbf{Physical Layer} -- Covers data in its rawest forms, such as bits. But also electric signals. \end{etaremune} %A popular pneumonic is... \noindent{This table breaks down the OSI Model.}\\ \begin{tabular}{ |p{1cm}||p{2.8cm}|p{3.8cm}|p{6cm}| } \hline \multicolumn{4}{|c|}{\textbf{OSI Model}} \\ \hline Type & Data Unit & Layer & Function\\ \hline Host & Data & 7 -- Application Layer & High-level APIs, including resource sharing, remote file access\\ \hline Host & Data & 6 -- Presentation Layer & Translation of data between a networking service and an application; including character encoding, data compression and encryption/decryption.\\ \hline Host & Data & 5 -- Session Layer & Managing communication sessions, i.e. continuous exchange of information in the form of multiple back-and-forth transmissions between two nodes.\\ \hline Host & Segments & 4 -- Transport Layer & Reliable transmission of data segments between points on a network, including segmentation, acknowledgement and multiplexing.\\ \hline Media & Packet/Datagram & 3 -- Network Layer & Structuring and managing a multi-node network, including addressing, routing and traffic control.\\ \hline Media & Frames/Bits & 2 -- Data Link Layer & Reliable transmission of data frames between two nodes connected by a physical layer.\\ \hline Media & Bits & 1 -- Physical Layer & Transmission and reception of raw bit streams over a physical medium.\\ \hline \end{tabular}\\ \chapter{902} \section{Booting and installation} The BIOS/UEFI of any given machine usually supports mulitple boot sources. Here are some examples with a respective scenario. \begin{itemize} \item \textbf{USB} -- \item \textbf{CD-ROM} -- To install an older operating system (less than 900MB), mostly obsolete as the last Windows version to use CD-ROMs was... Alternatively to run a live CD of an older GNU/Linux distro such as Ubuntu 10.04. \item \textbf{DVD} -- To install a more modern operating system (less than 4GB), now officially obsolete as Windows 10 and most GNU/Linux distros insist on USB. \item \textbf{PXE (Preboot eXecution Environment)} -- To run a Windows or GNU/Linux machine without booting from a hard disk. An example would be a cashier's till. \item \textbf{Solid state/flash drives} -- \item \textbf{Netboot} -- \item \textbf{External/hot swappable drive} -- \item \textbf{Internal hard drive (partition)} -- \end{itemize} \section{Operating Systems} %Definition of an OS \subsection{32-bit and 64-bit} \subsection{Introduction to Windows} \subsubsection{Applications} Terminal emulators \begin{itemize} \item \textbf{Command Prompt} (\texttt{cmd.exe}) -- Basic terminal emulator with support for running commands and batch files. \item \textbf{PowerShell} (\texttt{powershell.exe}) -- Advanced terminal emulator with support for running commands, batch files and PowerShell scripts. Planned to replace Command Prompt in the future. \end{itemize} \ \subsubsection{Useful Windows commands} %Add examples of these commands. \begin{itemize} \item \texttt{bootrec} -- \texttt{bootrec} or \textit{boot recovery} is used to troubleshoot issues with Windows startup such as the \textbf{MBR (Master Boot Record)}, boot sector and \textbf{BCD (Boot Configuration Data)} store. \item \texttt{cd} -- \texttt{cd} or \textit{change directory} is used to change the directory (folder). \item \texttt{chkdsk} -- \texttt{chkdsk} or \textit{check disk} is used for checking the hard disk for corruption, bad sectors and other maladies. \item \texttt{copy} -- Copies files... \item \texttt{del} -- \texttt{del} or \textit{delete} is used to delete a file or directory. \item \texttt{dir} -- \texttt{dir} or \textit{directory} is used to display the contents of the directory. \item \texttt{diskpart} -- \texttt{diskpart} or \textit{disk partition} is used to partition the disk. \item \texttt{diskmon} -- \texttt{diskmon} or \textit{disk monitor} is used to log and display hard drive activity. \item \texttt{defrag} -- \texttt{defrag} or \textit{defragment} removes fragmentation from the hard drive (see hard drives section for more information). \item \texttt{exit} -- Closes the Command Prompt/PowerShell window. \item \texttt{expand} -- Extracts the contents of a .cab file. Useful for manually installing Windows Update files. \item \texttt{format} -- Formats the root directory/partition to the ms-dos format. \item \texttt{fsutil} -- \texttt{fsutil} or \textit{file system utilities} is used for managing FAT32 and NTFS. \item \texttt{gpresult} -- \texttt{gpresult} or \textit{group policy result} is used to display the \textbf{RSoP (Resultant Set of Policy)} for the remote user and the local machine. \item \texttt{gpupdate} -- \texttt{gpupdate} or \textit{group policy update} refreshes the local and Active Directory Group Policy settings of a given machine. Replaces the deprecated \texttt{secedit /refreshpolicy} \item \texttt{help} -- Displays information on how to use the command (equivalent to the UNIX \texttt{man} command). \item \texttt{md} -- \texttt{md} or \textit{move directory} is used to move the directory. \item \texttt{ps} -- \texttt{ps} or \textit{process status} is used to list the current processes. \item \texttt{taskkill} -- \texttt{taskkill} is used to end a task. \item \texttt{tasklist} -- \texttt{tasklist} is used to display a list of tasks running. \item \texttt{rd} -- \texttt{rd} or \textit{remove directory} deletes a directory and its contents. \item \texttt{robocopy} -- \texttt{robocopy} or \textit{robust file copy} is used to copy entire directories and their contents from one place to another. \item \texttt{sfc} (example: \texttt{sfc /scannow}) -- \texttt{sfc} or \textit{system file check} is a command used to scan the operating system for corrupted files and then replace them (requires admin privileges). \item \texttt{shutdown} -- Shuts down the computer. \item \texttt{xcopy} -- \texttt{xcopy} or \textit{extended copy} has the same function as \texttt{robocopy}, albeit with less features. The command has been replaced by \texttt{robocopy} and therefore should only be used on Windows versions prior to Vista and Server 2008 (XP and older). \end{itemize} \subsubsection{Key Boot Files} When Windows 7 boots these are the files that load and their function. \begin{itemize} \item \textbf{BOOTMGR (Windows Boot Manager)} -- Program that bootstraps (loads) the operating system. \item \textbf{BCD (Boot Configuration Data)} -- Holds the information about any Windows versions on the user's hard drive. \item \textbf{WINLOAD.exe} -- This program boots Windows 7. \item \textbf{WINRESUME.exe} -- Program that handles continuing a previous session in the event of hibernation. \item \textbf{NTOSKRNL.exe (NTOS Kernel)} -- This is the Windows kernel. \end{itemize} \subsubsection{Administrative Tools} Administrative Tools or Admin Tools is a section of Control Panel that offers the following functionality: \begin{itemize} \item \textbf{Component Services} -- utility used to configure COM components and COM+. \item \textbf{Computer Management} -- \item \textbf{Data Sources (ODBC)} -- \item \textbf{Event Viewer} -- utility used to view the event log, which is useful for diagnosing a fault with the machine or an error that had an unclear message. \item \textbf{iSCSI Initiator} -- \item \textbf{Performance Monitor} -- utility used to monitor and log the usage of system resources (CPU, RAM, GPU, Ethernet... etc.), it is more informative than the Performance tab in Windows Task Manager. \item \textbf{Services} -- \item \textbf{System Configuration} -- \item \textbf{Task Scheduler} -- utility used to configure the computer to do a particular task (or set of tasks) at a particular time (for example, one could ask the machine to do disk defragmentation at noon every Wednesday.) \item \textbf{Windows Firewall with Advanced Security} -- \item \textbf{Windows Memory Diagnostic} -- \item \textbf{Windows PowerShell modules} -- utility used for managing cmdlets or modules for PowerShell. \end{itemize} \subsubsection{Windows Task Manager (\texttt{taskmgr.exe})} Windows Task Manager or simply Task Manager is a system utility program that handles task management, system monitoring and startup controls. Windows Task Manager has the following tabs: \begin{itemize} \item \textbf{Applications} -- This tab handles user applications that are currently running. \item \textbf{Processes} -- This tab handles the applications and other programs running in memory. Always be careful when forcefully ending a process (though it is often the quickest way to close a program that has crashed). \item \textbf{Services} -- This tab handles services, which are programs that run in the background (Windows equivalent to a UNIX daemon). \item \textbf{Performance} -- This tab handles the performance statistics for the CPU and RAM. It also provides other information such as system uptime. \item \textbf{Networking} -- This tab handles the traffic statistics being used by the \textbf{NIC (Network Interface Card)}. \item \textbf{Users} -- This tab handles the current users who are logged in. \end{itemize} \subsubsection{System Configuration (\texttt{msconfig})} System Configuration (\texttt{msconfig}) is a tool mostly used for adjusting boot settings. \begin{itemize} \item \textbf{General} -- This tab handles basic booting options and has three radio-buttons allowing the user to decide if they want a \textit{Normal Startup} (loading all device drivers and services), \textit{Diagnostic Startup} (loading basic drivers and services) and \textit{Selective Startup} which allows the user to toggle 3 checkboxes (Load system services, load startup items and use original boot configuration). \item \textbf{Boot} -- This tab handles the configuration settings for Windows Boot Manager. First the user selects the Windows operating system and then they can configure the settings for the next boot. There is also an option to make all boot settings permanent. \item \textbf{Services} -- This tab handles which Windows services should start at startup. This feature was removed in Windows 8--10 and instead recommends Task Manager (\texttt{taskmgr.exe}) or Services (\texttt{services.msc}). \item \textbf{Startup} -- This tab handles applications and system utilities that open/initialise on startup. \item \textbf{Tools} -- This tab provides a nifty little launcher for opening other Windows configuration/administration tools such About Windows (\texttt{winver.exe}), Command Prompt (\texttt{cmd.exe}) and Task Manager (\texttt{taskmgr.exe}). \end{itemize} \subsubsection{Backup utilies} \begin{itemize} \item \textbf{WBAdmin} \item \textbf{NTBackup} \end{itemize} \subsubsection{Active Directory} RSoP (Resultant Set of Policy) \section{Introduction to GNU/Linux} Throughout a technician's career interaction with GNU/Linux is inevitable. To introduce the topic I have started with some questions that first time engineers would mostly ask. \subsection{What is GNU/Linux?} The first point of confusion might be the name itself as throughout this book I have referred to GNU/Linux. However I have reasoning behind this and I'll explain why. \subsection{What is Linux?} Linux by itself is an operating system kernel. A kernel is a core part of an operating system that responsible for... tasks. There are several types of kernel, the Linux kernel is monolithic meaning most tasks are carried about by the kernel. However a lot of people refer to any operating system that uses the Linux kernel as ``Linux'' even if it lacks GNU components. The mascot for the Linux kernel is Tux the penguin. %insert diagram \subsection{What is GNU?} GNU (pronounced \textit{Ger-New}) is a set of operating system components developed by Richard Stallman and other collaborators. The original initiative was to make an entire GNU operating system which could be licensed under Stallman's free software philosophy \subsection{What is differs GNU/Linux from Windows and MacOS?} Windows and MacOS are packaged operating systems which bundle all their components (kernel, drivers, desktop, application software, utility software...) into one single disk. When you buy a copy of Windows, you are acquiring a whole standard package that you can then install on your hardware and utilise. %As GNU plays a huge in role in most With GNU/Linux everything but the kernel can be swapped out. Many different companies, corporations and individuals release their own GNU/Linux versions called distributions or distros which can be used on a variety of hardware to either act as alternative to Windows and MacOS or other functionality entirely. \subsection{What GNU/Linux distros do you recommend?} There are lots of distros out there, probably hundreds (no I haven't stopped to count them) floating around the internet. Here are some common distros that I'd recommend you try out, all are free to download unless otherwise noted.\\ \textbf{Desktop:} These distros can be installed and can substitute your Windows or MacOS computer. \begin{itemize} \item \textit{Debian} -- \item \textit{Ubuntu} -- \item \textit{Fedora} -- \end{itemize} \textbf{Server:} These distros are designed for servers and often are favoured for their stability. \begin{itemize} \item \textit{Debian} -- \item \textit{Ubuntu} -- \item \textit{RHEL (Red Hat Enterprise Linux)/CentOS} -- \end{itemize} \textbf{Speciality:} These distros are designed to be run solely from a USB flash drive for assisting with problems with a computer or the network. \begin{itemize} \item \textit{Kali} -- Based on Ubuntu, this distro is designed for penetration testing and includes a multitude of tools to assist its users. If you are building a network, it's worth utilising Kali and seeing if you can make your way in. A good start is trying \texttt{aircrack-ng} as I mentioned in the wireless networking chapter. \item \textit{Parted Magic} -- This distro is designed primarily to fix hard drive partitions (via \texttt{GParted}), however its very useful for cloning a hard drive's contents (via \texttt{clonezilla}), checking a hard drive's health (via \texttt{smartctl}) and performing other tasks including password recovery. The distro currently costs \textdollar9 (\pounds7.28/\EUR8.38) for a single release and \textdollar49 (\pounds39.65/\EUR45.61) for a year's subscription (as of March 2017) from the official website. \end{itemize} \subsubsection{Useful GNU/Linux commands} Many of these commands are part of both \textbf{POSIX (Portable Operating System Interface)} and the \textbf{SUS (Single UNIX Specification)}. This means they will work on UNIX itself and other UNIX-like operating systems (macOS, BSD, Solaris... etc.). \begin{itemize} \item \texttt{awk} -- \texttt{awk} is used to run scripts programmed in the AWK language. \item \texttt{cd} -- \texttt{cd} or \textit{change directory} is used to change the directory (folder). \item \texttt{dd} -- \texttt{dd} or \textit{data destroyer} (nickname) is command used to write data to or erase data from a hard drive/flash drive/memory card. Often used in live USB creation for operating systems. \item \texttt{exit} -- Closes the terminal emulator's window or logs the user out a terminal session. \item \texttt{grep} -- \texttt{grep} or \textit{Globally search a Regular Expression and Print} is used to work with regular expressions (programmable patterns used to assist with searching and replacing text). \item \texttt{ls} -- \texttt{ls} or \textit{list} is used to list the contents of the directory. \item \texttt{man} (example: \texttt{man cd}) -- \texttt{man} or \textit{manual} is used to display a help screen for a given command. \item \texttt{mkdir} -- \texttt{mkdir} or \textit{make directory} is used to make a new directory. \item \texttt{mv} -- \texttt{mv} or \textit{move} is used to move files and/or rename them. \item \texttt{ps} -- \texttt{ps} or \textit{process status} is used to list the current processes. \item \texttt{pwd} -- \texttt{pwd} or \textit{print working directory} is used to display the full file path. \item \texttt{passwd} -- \texttt{passwd} or \textit{password} is used to change a user's password. \item \texttt{su} -- \texttt{su} or \textit{superuser} is used to elevate the shell itself to root (administrator). \item \texttt{sudo} -- \texttt{sudo} or \textit{superuser do} is used to run the command as root. \item \texttt{uname} (example: \texttt{uname /a}) -- \texttt{uname} or \textit{UNIX name} is used to display the name of the operating system. \end{itemize} \section{Virtualisation} Virtualisation is the name of the process of running one or multiple operating system(s) in a secure, emulated environment called a virtual machine. \subsection{Purpose of virtual machines} Virtual machines are secured, isolated environments Resource requirements Resource requirements vary several factors including the solution used and how much of the CPU/RAM/GPU capacity is allocated to the virtual machine(s). Emulator requirements Security requirements Network requirements Hypervisor A hypervisor is the program that manages virtual machines. \subsection{Virtualisation software} There are several virtualisation solutions, here's the most common 5: \begin{itemize} \item Hyper-V -- Microsoft's own standard for virtualisation. \item VMWare -- Popular (particularly in the consumer market) for virtualisation. \item Oracle Virtualbox -- Popular open-source virtualisation software. \item KVM -- A Linux Loadable Kernel Module (LKM) that allows... \item Xen -- \end{itemize} \section{Cloud} \subsection{Public/Private/Hybrid/Community} \subsection{Types of Cloud Services} \begin{itemize} \item \textbf{Infrastructure as a Service (IaaS)} -- The lowest layer, covers cloud infrastructure and virtual machines. OpenStack and Microsoft's Hyper-V are examples. \item \textbf{Platform as a Service (PaaS)} -- This layer covers a vendor providing a development environment (such as an IDE and runtime) to its userbase. \item \textbf{Software as a Service (SaaS)} -- This layer covers a vendor providing software to an audience such as Google Docs or ShareLaTeX. \item \textbf{Storage as a Service (StaaS)} -- This layer covers a vendor providing storage services to users. Google Drive, MEGA and Dropbox are examples. \end{itemize} \subsection{Cloud Features} \begin{itemize} \item \textbf{Rapid Elasticity} -- The ability for a cloud service to scale up or down with ease. \item \textbf{On-demand} -- The ability to make resources available when the user needs it. \item \textbf{Resource Pooling} -- Resource pooling is an IT term used in cloud computing environments to describe a situation in which providers serve multiple clients, customers or "tenants" with provisional and scalable services. %Reword \item \textbf{Measured Service} -- Measured service is a term that IT professionals apply to cloud computing. This is a reference to services where the cloud provider measures or monitors the provision of services for various reasons, including billing, effective use of resources, or overall predictive planning. %Reword \end{itemize} \section{Appendix} \subsection{The 6 steps for troubleshooting} \noindent{CompTIA(R) have a specific methodology they suggest for troubleshooting any computer-related problem} \begin{enumerate} \item \textbf{Identify the problem} \item \textbf{Establish theory of probable cause} \item \textbf{Test the theory} \item \textbf{Establish plan of action} \item \textbf{Verify full system functionality and implement preventative measures if necessary} \item \textbf{Document findings, actions, and outcomes} \end{enumerate} \end{document}
{ "alphanum_fraction": 0.7688764856, "avg_line_length": 67.7791005291, "ext": "tex", "hexsha": "c995d5bf61d36213ff33fa7f848428de2ce57a3c", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-01-21T14:23:23.000Z", "max_forks_repo_forks_event_min_datetime": "2019-01-29T13:49:38.000Z", "max_forks_repo_head_hexsha": "c84daebbad850cc413ed7a5d710d02c3564f331f", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "inferno986return/CompTIA-A-Notes", "max_forks_repo_path": "src/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c84daebbad850cc413ed7a5d710d02c3564f331f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "inferno986return/CompTIA-A-Notes", "max_issues_repo_path": "src/main.tex", "max_line_length": 518, "max_stars_count": null, "max_stars_repo_head_hexsha": "c84daebbad850cc413ed7a5d710d02c3564f331f", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "inferno986return/CompTIA-A-Notes", "max_stars_repo_path": "src/main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 12487, "size": 51241 }
\section{Kinematic ages} \subsection{Calculating velocity dispersions} \label{sec:velocity_dispersion} A kinematic age can be calculated from the velocity dispersion, \ie\ standard deviation, of a group of stars. These velocity dispersions can then be converted into an age using an AVR \citep[\eg][]{holmberg2009, yu2018}. Kinematic ages represent the {\it average age} of a group of stars and are most informative when stars are grouped by age. If a group of stars have similar ages, their kinematic age will be close the age of each individual. On the other hand, the kinematic age of a group with large age variance will not provide much information about the ages of individual stars. Velocity distributions themselves do not reveal whether a group of stars have similar or different ages, since either case the velocities are Gaussian-distributed. Fortunately however, we can group \kepler\ stars by age using the implicit assumption that underpins gyrochronology: that stars with the same rotation period and color are the same age. % We discuss the implications of this assumption and cases where it doesn't % apply in the Discussion of this paper (section \ref{sec:discussion}). In this paper, we use the kinematic ages published in \citet{lu2021}. In that work, the kinematic age of each star in our sample was calculated by placing it in a bin with other stars with similar rotation periods, effective temperatures, absolute Gaia magnitudes and Rossby numbers. The kinematic age of each star was estimated by calculating the velocity dispersions of stars with these similar parameters, then using an AVR to calculate a corresponding age \citep{yu2019}. The bin size was optimized using a number of Kepler stars with asteroseismic ages. We used the \citet{yu2018} AVR to convert velocity dispersion to age. This relation was calibrated using the ages and velocities of red clump stars. They divided their sample into metal rich and poor subsets, and calibrated separate AVRs for each, plus a global AVR. Their AVR is a power law: \begin{equation} \sigma_{vz} = \alpha t ^\beta, \end{equation} where $\alpha$ and $\beta$ take values (6.38, 0.578) for metal rich stars (3.89, 1.01) for metal poor stars, and (5.47, 0.765) for all stars. We used 1.5$\times$ the Median Absolute Deviation (MAD) of velocities, which is a robust approximation to the standard deviation and is less sensitive to outliers. Velocity outliers could be binary stars or could be generated by underestimated parallax or proper motion uncertainties. Figure \ref{fig:kin_and_clusters} displays the data we used to calibrate our gyrochronology model in \prot-\teff\ space. Kepler field stars are shown as small points, and cluster stars are larger points with black outlines. Points are colored by either their kinematic ages or cluster ages. The left- and right-hand panels have a linear and logarithmic y-axis, respectively. \begin{figure} \caption{ The calibration data. Kepler field stars are shown as small points, and cluster stars are larger points with black outlines. Points are colored by either their kinematic ages or cluster ages. The left- and right-hand panels have a linear and logarithmic y-axis, respectively. } \centering \includegraphics[width=1\textwidth]{kin_and_clusters_log_lin} \end{figure}
{ "alphanum_fraction": 0.7957703927, "avg_line_length": 47.2857142857, "ext": "tex", "hexsha": "a389fa6a7995140c2cfb58f0b8a99b1e3734c295", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "73c11348b32c29ffb0bd3d1d6179df95e89c3121", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "RuthAngus/aviary", "max_forks_repo_path": "paper/ages.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "73c11348b32c29ffb0bd3d1d6179df95e89c3121", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "RuthAngus/aviary", "max_issues_repo_path": "paper/ages.tex", "max_line_length": 78, "max_stars_count": null, "max_stars_repo_head_hexsha": "73c11348b32c29ffb0bd3d1d6179df95e89c3121", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "RuthAngus/aviary", "max_stars_repo_path": "paper/ages.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 782, "size": 3310 }
%!TEX root = thesis.tex \chapter{Future Work} \label{chap:futureWork} The results in this work have identified controlling parameters for ignition and provided a framework for creating models with capabilities to predict various ignition scenarios. Significant additional work is needed to refine the proposed framework and explore different ignition parameters. \begin{itemize} \item The moisture content of the fuel bed was not considered in this work but is a well-studied parameter that significantly affects ignition. Extension of the experiments or a meta-analysis of literature is necessary to determine how moisture content fits into the proposed framework. The change in fuel bed bulk density due to moisture may sufficiently characterize the difference, but that remains to be seen. \item Additional tests with well-characterized smoldering or flaming embers would significantly enhance the knowledge gained in this work. Typical temperatures of physically combusting firebrands are higher than those capable by the apparatus used in this work~\cite{Fateev2017a}. The higher temperatures may influence the ignition propensity. However, combusting embers are susceptible to heat losses and environmental factors where the firebrand surrogates used in this work are mainly independent of these factors. It is unclear how or if the addition of energy sources that react with the environment will shift the parameters identified to be most influential to ignition. A series of highly instrumented and controlled experiments focused on the coupled interactions between a reacting ember, a reacting fuel bed, and changing environmental conditions would provide novel and valuable insight into ignition processes. I applaud the patience and perseverance of those who may undertake this endeavor in the future. \item Relatively limited data is available regarding the thermal properties of porous biomass media. The lack of available information is likely due to the significant variation in solid biomass material properties and the endless configurations (e.g., porosity and particle orientation) in the natural environment. A series of targeted studies determining the thermal properties of common fuel bed materials would likely increase the understanding of how ignition changes between different materials or even similar materials under various packing conditions. \item In addition to the limited data available for thermal properties of fuel bed materials, the chemical composition of materials and the influence of chemical changes on the pyrolysis and subsequent ignition would provide valuable insight into the differences in ignition between materials and environmental conditions. \end{itemize}
{ "alphanum_fraction": 0.8034641216, "avg_line_length": 176.8125, "ext": "tex", "hexsha": "b629df55ba0de0ecdc0a256e422bd7cbd37aa3ad", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8b122d3a272cd7db593231442a6c4dc75baeb454", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "derekb63/OSU-thesis", "max_forks_repo_path": "futureWork.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8b122d3a272cd7db593231442a6c4dc75baeb454", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "derekb63/OSU-thesis", "max_issues_repo_path": "futureWork.tex", "max_line_length": 1032, "max_stars_count": null, "max_stars_repo_head_hexsha": "8b122d3a272cd7db593231442a6c4dc75baeb454", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "derekb63/OSU-thesis", "max_stars_repo_path": "futureWork.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 481, "size": 2829 }
\section{Conclusion} \label{sec:snic:conclude} We propose network disaggregation and consolidation by building SuperNIC, a new networking device specifically for a disaggregated datacenter. Our FPGA prototype demonstrates the performance and cost benefits of \snic. Our experience also reveals many new challenges in a new networking design space that could guide future researchers. \section{Acknowledgments} Chapter 5, in part, has been submitted for publication of the material as it may appear in Yizhou Shan, Will Lin, Ryan Kosta, Arvind Krishnamurthy, Yiying Zhang, ``Disaggregating and Consolidating Network Functionalities with SuperNIC'', \textit{arXiv, 2022}. The dissertation author was the primary investigator and author of this paper.
{ "alphanum_fraction": 0.8226666667, "avg_line_length": 83.3333333333, "ext": "tex", "hexsha": "f777e706bb461eccbb70b06b71a6303cd9fda8bf", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "859886a5c8524aa73d7d0784d5d695ec60ff1634", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "lastweek/2022-UCSD-Thesis", "max_forks_repo_path": "snic/conclude.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "859886a5c8524aa73d7d0784d5d695ec60ff1634", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "lastweek/2022-UCSD-Thesis", "max_issues_repo_path": "snic/conclude.tex", "max_line_length": 338, "max_stars_count": 12, "max_stars_repo_head_hexsha": "859886a5c8524aa73d7d0784d5d695ec60ff1634", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "lastweek/2022-UCSD-Thesis", "max_stars_repo_path": "snic/conclude.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-18T16:28:32.000Z", "max_stars_repo_stars_event_min_datetime": "2022-03-14T03:09:38.000Z", "num_tokens": 163, "size": 750 }
\startcomponent co-en-12 \environment contextref-env \product contextref \chapter[lines,frames]{Lines and frames} \section{Introduction} \TEX\ has an enormous capacity in handling text, but is very weak at handling graphical information. Lines can be handled adequately as long as you use vertical or horizontal lines. However, you can do graphical work with \TEX\ by combining \TEX\ and \METAPOST. In this chapter we introduce a number of commands that relate to drawing straight lines in your text. We will see a very sophisticated command \type {\framed} that can be used in many ways. The parameters of this command are also available in other commands. \section[single lines]{Single lines} \index{lines} \macro{\tex{hairline}} \macro{\tex{thinrule}} \macro{\tex{thinrules}} \macro{\tex{setupthinrules}} \macro{\tex{vl}} \macro{\tex{hl}} The simplest way to draw a line in \CONTEXT\ is: \showsetup{hairline} For example: \startbuffer \hairline In what fairy tale is the wolf cut open and filled with stones? Was it in {Little Red Riding-hood} or in \quote {The wolf and the seven goats}. \hairline \stopbuffer \startexample \typebuffer \stopexample This will become: \startreality \getbuffer \stopreality It does not look good at all. This is caused by the fact that a drawn line gets its own vertical whitespace. In \in{section}[textlines] we will show how to alter this. The effects of the command \type{\hairline} is best illustrated when we visualize \type{\strut}'s. We did so by saying \type {\showstruts} first. \page \message{harde paginaovergang ivm toonstrut} \startreality {\showstruts\hairline} A strut is a character with a maximum height and depth, but no width. The text in this example is surrounded by two strutted lines. {\showstruts\hairline} \stopreality It is also possible to draw a line over the width of the actual paragraph: \showsetup{thinrule} Or more than one lines by: \showsetup{thinrules} For example: \startbuffer \startitemize \item question 1 \par \thinrules[n=2] \item question 2 \par \thinrules[n=2] \stopitemize \stopbuffer \startexample \typebuffer \stopexample If you leave out a \type {\par} (or empty line), the thin rules come after the text. Compare \getbuffer with \startbuffer \startitemize \item question 1 \thinrules[n=2] \item question 2 \thinrules[n=2] \stopitemize \stopbuffer \getbuffer The last example was keyed in as: \typebuffer The parameters are set with: \showsetup{setupthinrules} You can draw thin vertical or horizontal lines with the commands: \showsetup{vl} \showsetup{hl} The argument is optional. To \type{\vl} (\hbox spread .5em {\hss \vl \hss}) you may pass a factor that relates to the actual height of a line and to \type{\hl} (\hbox spread .5em {\hss \hl \hss}) a width that relates to the width of an em. So \type {\vl[2]} produces a rule with a height of two lines. \section[fill in rules]{Fill in rules} \index{lines} \index{questionnaire} \macro{\tex{fillinline}} \macro{\tex{fillinrules}} \macro{\tex{setupfillinline}} \macro{\tex{setupfillinrules}} On behalf of questionnaires there is the command: \showsetup{fillinline} With the accompanying setup command: \showsetup{setupfillinlines} The example: \startbuffer \fillinline[n=2,width=2cm]{name} \par \fillinline[n=2,width=2cm]{address} \par \stopbuffer \startexample \typebuffer \stopexample Leads to the next list: \startreality \getbuffer \stopreality An alternative is wanting the fill||in rule at the end of a paragraph. Then you use the commands: \showsetup{fillinrules} \showsetup{setupfillinrules} The next example will show the implications: \startbuffer \fillinline[width=3cm] Consumers in this shopping mall are frequently confronted with questionnaires. Our hypothesis is that consumers rather shop somewhere else than answer these kind of questionnaires. Do you agree with this? \stopbuffer \startexample \typebuffer \stopexample In this example we could of course have offered some alternatives for answering this question. By setting the width to \type {broad}, we get \startreality \getbuffer\par \stopreality The next set of examples demonstrate how we can influence the layout. \startbuffer \fillinrules[n=2,width=fit]{first} \fillinrules[n=2,width=broad]{first} \fillinrules[n=2,width=3cm]{first} \fillinrules[n=2,width=fit,distance=.5em,separator=:]{first} \fillinrules[n=2,width=broad,distance=.5em]{first}{last} \stopbuffer \typebuffer \getbuffer \section[textlines]{Text lines} \macro{\tex{textrule}} \macro{\tex{setuptextruleen}} A text line is drawn just before and/or after a paragraph. The upper line may also contain text. The command is: \showsetup{textrule} An example: \startbuffer \textrule[top]{Instruments} Some artists mention the instruments that they use during the production of their \kap{CD}. In Peter Gabriel's \quote {Digging in the dust} he used the {\em diembe}, {\em tama} and {\em surdu}. The information on another song mentions the {\em doudouk}. Other \quote {unknown} instruments are used on his \kap{cd} \quote {Passion}. \textrule \stopbuffer \startexample \typebuffer \stopexample This will result in: \getbuffer The behaviour of textlines is set up with the command below. With the parameter \type{width} you set the length of the line in front of the text. \showsetup{setuptextrules} These is also a \type {\start}||\type {\stop} alternative. This one also honors the \type {bodyfont} parameter. \showsetup{starttextrule} \section[underline,overline,overstrike]{Underline} \index{underline} \index{overstrike} \macro{\tex{underbar}} \macro{\tex{underbars}} \macro{\tex{overstrike}} \macro{\tex{overstrikes}} Underlining text is not such an ideal method to banner your text. Nevertheless we introduced this feature in \CONTEXT. Here is how it \underbar{works}. We use: \showsetup{underbar} \startbuffer \underbar {A disadvantage of this command is that words can \underbar {no} longer be hyphenated. This is a nasty side||effect. But we do support \underbar {nested} underlining.} \underbars {The spaces in the last paragraph were also underlined. If we do not want that in this paragraph we use:} \stopbuffer \getbuffer \showsetup{underbars} From the input we can see that the hyphen results from the compound word. \typebuffer The counterpart of these commands are: \showsetup{overbar} \showsetup{overbars} You may wonder for what reasons we introduced these commands. The reasons are mainly financial: \startbuffer \starttabulate[|l|r|] \NC product 1 \NC 1.420 \NC \NR \NC product 2 \NC 3.182 \NC \NR \NC total \NC \overbar{4.602} \NC \NR \stoptabulate \stopbuffer \startreality \getbuffer \stopreality This financial overview is made with: \startexample \typebuffer \stopexample The number of parameters in these commands is limited: \showsetup{setupunderbar} The alternatives are: {\setupunderbar [alternative=a]\underbar {alternative~a}, \setupunderbar [alternative=b]\underbar {alternative~b}, \setupunderbar [alternative=c]\underbar {alternative~c}} while another line thickness results in: {\setupunderbar [rulethickness=1pt]\underbar {1pt~line}, \setupunderbar [rulethickness=2pt]\underbar {2pt~line}}. A part of the text can be \overstrike {striked} with the command: \showsetup{overstrike} This command supports no nesting. Single \overstrikes {words are striked} with: \showsetup{overstrikes} \section[framing,stp:framed]{Framing} \index{framing} \index{frames} \macro{\tex{setupframedin}} \macro{\tex{framed}} \macro{\tex{inframed}} Texts can be framed with the command: \type{\framed}. In its most simple form the command looks like this: \startbuffer \framed{A button in an interactive document is a framed text with specific characteristics.} \stopbuffer \startexample \typebuffer \stopexample The becomes: \startlinecorrection \getbuffer \stoplinecorrection The complete definition of this command is: \showsetup{framed} You may notice that all arguments are optional. \startbuffer \framed [height=broad] {A framed text always needs special attention as far as the spacing is concerned.} \stopbuffer \startexample \typebuffer \stopexample Here is the output of the previous source code: \startlinecorrection \getbuffer \stoplinecorrection For the height, the values \type {fit} and \type {broad} have the same results. So: \startbuffer \hbox {\framed[height=broad]{Is this the spacing we want?} \hskip1em \framed[height=fit] {Or isn't it?}} \stopbuffer \typebuffer will give us: \startlinecorrection \getbuffer \stoplinecorrection To obtain a comparable layout between framed and non||framed framing can be set on and off. \startlinecorrection \hbox{\framed[width=2cm,frame=on]{yes} \framed[width=2cm,frame=off]{no} \framed[width=2cm,frame=on]{yes}} \hbox{\framed[width=2cm,frame=off]{no} \framed[width=2cm,frame=on]{yes} \framed[width=2cm,frame=off]{no}} \stoplinecorrection The rulethickness is set with the command \type {\setuprulethickness} (see \in{section}[rulethickness]). A framed text is typeset \quote {on top of} the baseline. When you want real alignment you can use the command \type{\inframed}. \startbuffer to \framed{frame} or to be \inframed{framed} \stopbuffer \startexample \typebuffer \stopexample or: \startreality \getbuffer \stopreality It is possible to draw parts of the frame. In that case you have to specify the separate sides of the frame with \type {leftframe=on} and the alike. We will now show some alternatives of the command \type {\framed}. Please notice the influence of \type {offset}. When no value is given, the offset is determined by the height and depth of the \type {\strut}, that virtual character with a maximum height and depth with no width. When exact positioning is needed within a frame you set \type {offset} at \type {none} (see also \in {tables} [tab:strut 1], \in [tab:strut 2] \in {and} [tab:strut 3]). Setting the \type {offset} to \type {none} or \type {overlay}, will also disable the strut. \def\toonframed[#1]% {\leavevmode\framed[#1]{\tttf#1}\par} \startpacked \toonframed[width=fit] \toonframed[width=broad] \toonframed[width=8cm,height=1.5em] \toonframed[offset=5pt] \toonframed[offset=0pt] \toonframed[offset=none] \toonframed[offset=overlay] \toonframed[width=8cm,height=1.5em,offset=0pt] \toonframed[width=8cm,height=1.5em,offset=none] \stoppacked The commands \type {\lbox} (ragged left), \type {\cbox} (ragged center) and \type {\rbox} (ragged right) can be combined with \type {\framed}: \startbuffer[examp-1] \framed [width=.2\hsize,height=3cm] {\lbox to 2.5cm{\hsize2.5cm left\\of the\\middle}} \stopbuffer \startbuffer[examp-2] \framed [width=.2\hsize,height=3cm] {\cbox to 2.5cm{\hsize2.5cm just\\in the\\middle}} \stopbuffer \startbuffer[examp-3] \framed [width=.2\hsize,height=3cm] {\rbox to 2.5cm{\hsize2.5cm right\\of the\\middle}} \stopbuffer \startlinecorrection \startcombination[3] {\getbuffer[examp-1]} {\type{\lbox}} {\getbuffer[examp-2]} {\type{\cbox}} {\getbuffer[examp-3]} {\type{\rbox}} \stopcombination \stoplinecorrection The second text is typed as follows: \startexample \typebuffer[examp-2] \stopexample There is a more convenient way to align a text, since we have the parameters \type {align} and \type{top} and \type{bottom}. In the next one shows the influence of \type {top} and \type {bottom} (the second case is the default). \startbuffer \setupframed[width=.2\hsize,height=3cm,align=middle] \startcombination[4] {\framed[bottom=\vss,top=\vss]{just\\in the\\middle}} {\type{top=\vss}\crlf\type{bottom=\vss}} {\framed[bottom=\vss,top=] {just\\in the\\middle}} {\type{top=} \crlf\type{bottom=\vss}} {\framed[bottom=,top=\vss] {just\\in the\\middle}} {\type{top=\vss}\crlf\type{top=}} {\framed[bottom=,top=] {just\\in the\\middle}} {\type{top=} \crlf\type{bottom=}} \stopcombination \stopbuffer \typebuffer \startlinecorrection \getbuffer \stoplinecorrection In the background of a framed text you can place a screen or a coloured background by setting \type {background} at \type {color} or \type {screen}. Don't forget to activate the the colour mechanism by saying (\type {\setupcolors[state=start]}). \start \setupframed[width=5cm,height=1cm] \startlinecorrection \startcombination[2*2] {\framed [background=screen] {\tfb In the}} {\tttf background=screen} {\framed [background=screen,backgroundscreen=0.7] {\tfb dark}} {\tttf background=screen \endgraf backgroundscreen=0.7} {\framed [background=color] {\tfb all cats}} {\tttf background=color} {\framed [background=color,backgroundcolor=red] {\tfb are grey.}} {\tttf background=color \endgraf backgroundcolor=red} \stopcombination \stoplinecorrection \stop There is also an option to enlarge a frame or the background by setting the \type {frameoffset} and|/|or \type {backgroundoffset}. These do not influence the dimensions. Next to screens and colours you can also use your own kind of backgrounds. This mechanism is described in \in {section} [overlays]. The command \type{\framed} itself can be an argument of \type{\framed}. We will obtain a framed frame. \startbuffer[examp-1] \framed [width=3cm,height=3cm] {\framed[width=2.5cm,height=2.5cm]{hello world}} \stopbuffer \startexample \typebuffer[examp-1] \stopexample In that case the second frame is somewhat larger than expected. This is caused by the fact that the first framed has a strut. This strut is placed automatically to enable typesetting one framed text next to another. We suppress \type{\strut} with: \startbuffer[examp-2] \framed [width=3cm,height=3cm,strut=no] {\framed[width=2.5cm,height=2.5cm]{hello world}} \stopbuffer \startexample \typebuffer[examp-2] \stopexample When both examples are placed close to one another we see the difference: \startlinecorrection \startcombination {\showstruts\getbuffer[examp-1]} {\type{strut=yes}} {\showstruts\getbuffer[examp-2]} {\type{strut=no}} \stopcombination \stoplinecorrection A \type {\hairline} is normally draw over the complete width of a text (\type{\hsize}). Within a frame the line is drawn from the left to the right of framed box. Consequently the code: \startbuffer \framed[width=8cm,align=middle] {when you read between the lines \hairline you may see what effort it takes \hairline to write a macropackage} \stopbuffer \typebuffer produces the following output: \startlinecorrection \getbuffer \stoplinecorrection When no width is specified only the vertical lines are displayed. \startbuffer \framed {their opinions \hairline differ \hairline considerately} \stopbuffer \startlinecorrection \getbuffer \stoplinecorrection Which was obtained with: \startexample \typebuffer \stopexample The default setup of \type{\framed} can be changed with the command: \showsetup{setupframed} The command \type{\framed} is used within many other commands. The combined use of \type{offset} and \type{strut} may be very confusing. It really pays off to spend some time playing with these macros and parameters, since you will meet \type {\framed} in many other commands. Also, the parameters \type{width} and \type{height} are very important for the framing texts. For that reason we summarize the consequences of their settings in \in {table} [tab:strut 1], \in [tab:strut 2] \in {and} [tab:strut 3]. \startbuffer[table] \starttable[|c|c|c|c|c|c|] \HL \VL \VL \VL \FOUR{\tt offset} \VL\SR \DC \DC \DL[4] \DR \VL \VL \VL \tt .25ex \VL \tt 0pt \VL \tt none \VL \tt overlay \VL\SR \HL \VL \LOW{\tt strut} \VL \tt yes \VL \o[yes,.25ex] \VL \o[yes,0pt] \VL \o[yes,none] \VL \o[yes,overlay] \VL\SR \DC \DL[1] \DC\DC\DC\DR \VL \VL \tt no \VL \o[no,.25ex] \VL \o[no,0pt] \VL \o[no,none] \VL \o[no,overlay] \VL\SR \HL \stoptable \stopbuffer \placetable [here][tab:strut 1] {The influence of \type{strut} and \type{offset} in \type{\framed} (1).} {\def\o[#1,#2]{\framed[strut=#1,offset=#2]{}} \getbuffer[table]} \placetable [here][tab:strut 2] {The influence of \type{strut} and \type{offset} in \type{\framed} (2).} {\def\o[#1,#2]{\framed[strut=#1,offset=#2]{\TeX}} \getbuffer[table]} \startbuffer[table] \starttable[|c|c|c|c|] \HL \VL \VL \VL \TWO{\tt width} \VL\SR \DC \DC \DL[2] \DR \VL \VL \VL \tt fit \VL \tt broad (\string\hsize=4cm) \VL\SR \HL \VL \LOW{\tt height} \VL \tt fit \VL \o[fit,fit] \VL \hsize=4cm \o[fit,broad] \VL\SR \DC \DL[1] \DC\DR \VL \VL \tt broad \VL \o[broad,fit] \VL \hsize=4cm \o[broad,broad] \VL\SR \HL \stoptable \stopbuffer \placetable [here][tab:strut 3] {The influence of \type{height} and \type{width} in \type{\framed}.} {\def\o[#1,#2]{\framed[height=#1,width=#2]{xxxx}} \getbuffer[table]} \startbuffer \placefigure [left] {none} {\framed[align=middle]{happy\\birthday\\to you}} \stopbuffer \getbuffer At first sight it is not so obvious that \type {\framed} can determine the width of a paragraph by itself. When we set the parameter \type {align} the paragraph is first typeset and then framed. This feature valuable when typesetting titlepages. In the example left of this text, linebreaks are forced by \type {\\}, but this is not mandatory. This example was coded as follows: \startexample \typebuffer \stopexample The parameter \type {offset} needs some special attention. By default it is set at \type {.25ex}, based on the cureently selected font. The next examples will illustrate this: \startbuffer \hbox{\bf \framed{test} \sl \framed{test} \tfa \framed{test}} \hbox{\framed{\bf test} \framed{\sl test} \framed{\tfa test}} \stopbuffer \startexample \typebuffer \stopexample The value of \type{1ex} outside \type{\framed} determines the offset. This suits our purpose well. \startlinecorrection \getbuffer \stoplinecorrection The differences are very subtle. The distance between the framed boxes depends on the actual font size, the dimensions of the frame, the offset, and the strut. \TEX\ can only draw straight lines. Curves are drawn with small line pieces and effects the size of \DVI||files considerately and will cause long processing times. Curves in \CONTEXT\ are implemented by means of \POSTSCRIPT. There are two parameters that affect curves: \type{corner} and \type{radius}. When \type{corner} is set at \type{round}, round curves are drawn. \startlinecorrection \framed[corner=round]{Don't be to edgy.} \stoplinecorrection It is also possible to draw circles by setting \type{radius} at half the width or height. But do not use this command for drawing, it is meant for framing text. Use \METAPOST\ instead. Technically speaking the background, the frame and the text are separate components of a framed text. First the background is set, then the text and at the last instance the frame. The curved corner of a frame belongs to the frame and is not influenced by the text. As long as the radius is smaller than the offset no problems will occur. \section[framed texts]{Framed texts} \index{framing} \index{frames} \macro{\tex{defineframedtext}} \macro{\tex{setupframedtexts}} \macro{\tex{startframedtext}} When you feel the urge to put a frame around or a backgroud behind a paragraph there is the command: \showsetup{startframedtext} An application may look like this: \startbuffer \startframedtext[left] From an experiment that was conducted by C. van Noort (1993) it was shown that the use of intermezzos as an attention enhancer is not very effective. \stopframedtext \stopbuffer \startexample \typebuffer \stopexample \getbuffer % geen zinnetje ervoor This can be set up with: \showsetup{setupframedtexts} Framed texts can be combined with the place block mechanism, as can be seen in \in {intermezzo} [int:demo 1]. \startbuffer \placeintermezzo [here][int:demo 1] {An example of an intermezzo.} \startframedtext For millions of years mankind lived just like animals. Then something happened, which unleashed the power of our imagination. We learned to talk. \blank \rightaligned{--- The Division Bell / Pink Floyd} \stopframedtext \stopbuffer \startexample \typebuffer \stopexample In this case the location of the framed text (between \setchars) is left out. \getbuffer You can also draw a partial frame. The following setup produces \in {intermezzo} [int:demo 2]. \startexample \starttyping \setupframedtexts[frame=off,topframe=on,leftframe=on] \stoptyping \stopexample \start \setupframedtexts[frame=off,topframe=on,leftframe=on] \placeintermezzo [here][int:demo 2] {An example of an intermezzo.} \startframedtext Why are the world leaders not moved by songs like {\em Wozu sind Kriege da?} by Udo Lindenberg. I was, and now I wonder why wars go on and on. \stopframedtext \stop You can also use a background. When the background is active it looks better to omit the frame. \start \setupframedtexts[frame=off,background=screen] \placeintermezzo [here][] {An example of an intermezzo with background.} \startframedtext An intermezzo like this will draw more attention, but the readability is far from optimal. However, you read can it. This inermezzo was set up with : \starttyping \setupframedtexts[frame=off,background=screen] \stoptyping \stopframedtext \stop \in {Intermezzo} [int:color] demonstrate how to use some color: \startbuffer \setupframedtexts [background=screen, frame=off, rightframe=on, framecolor=darkgreen, rulethickness=3pt] \placeintermezzo [here][int:color] {An example of an intermezzo with a trick.} \startframedtext The trick is really very simple. But the fun is gone when Tom, Dick and Harry would use it too. \stopframedtext \stopbuffer \typebuffer \getbuffer So, in order to get a partial frame, we have to set the whole \type {frame} to \type {off}. This is an example of a situation where we can get a bit more readable source when we say: \startbuffer \startbuffer \startframedtext ... \stopframedtext \stopbuffer \placeintermezzo [here][int:color] {An example of an intermezzo with a trick.}{\getbuffer} \stopbuffer \typebuffer You do not want to set up a framed text every time you need it, so there is the following command: \showsetup{defineframedtext} \startbuffer \defineframedtext [musicfragment] [frame=off, rightframe=on, leftframe=on] \placeintermezzo [here][] {An example of a predefined framed text.} \startmusicfragment Imagine that there are fragments of music in your interactive document. You will not be able to read undisturbed. \stopmusicfragment \stopbuffer The definition: \startexample \typebuffer \stopexample results in: \getbuffer \section[margin rules]{Margin rules} \index{margin+lines} \macro{\tex{startmarginrule}} \macro{\tex{marginrule}} \macro{\tex{setupmarginrule}} To add some sort of flags to paragraphs you can draw vertical lines in the margin. This can be used to indicate that the paragraph was altered since the last version. The commands are: \showsetup{startmarginrule} \showsetup{marginrule} The first command is used around paragraphs, the second within a paragraph. By specifying a level you can suppress a margin rule. This is done by setting the \quote {global} level higher than the \quote {local} level. \showsetup{setupmarginrules} In the example below we show an application of the use of margin rules. \startbuffer \startmarginrule The sound of a duck is a good demonstration of how different people listen to a sound. Everywhere in Europe the sound is equal. But in every country it is described differently: kwaak||kwaak (Netherlands), couin||couin (French), gick||gack (German), rap||rap (Danish) and mech||mech (Spanish). If you speak these words aloud you will notice that \marginrule[4]{in spite of the} consonants the sound is really very well described. And what about a cow, does it say boe, mboe or mmmmmm? \stopmarginrule \stopbuffer \startexample \typebuffer \stopexample Or:\footnote{G.C. Molewijk, Spellingsverandering van zin naar onzin (1992).} \getbuffer If we would have set \type {\setupmarginrules[level=2]} we would have obtained a margin rule in the middle of the paragraph. In this example we also see that the thickness of the line is adapted to the level. You can undo this feature with \type{\setupmarginrules[thickness=1]}. \section[black rules]{Black rules} \index{black rules} \macro{\tex{blackrule}} \macro{\tex{blackrules}} \macro{\tex{setupblackrules}} Little black boxes |<|we call them black rules|>| (\blackrule) can be drawn by \type{\blackrule}: \showsetup{blackrule} When the setup is left out, the default setup is used. \showsetup{setupblackrules} The height, depth and width of a black rule are in accordance with the usual height, depth and width of \TEX. When we use the key \type {max} instead of a real value the dimensions of \TEX's \type{\strutbox} are used. When we set all three dimensions to \type {max} we get: \blackrule [width=max, height=max, depth=max]. \inleft{\blackrule}Black rules may have different purposes. You can use them as identifiers of sections or subsections. This paragraph is tagged by a black rule with default dimensions: \type{\inleft{\blackrule}}. A series of black rules can be typeset by \type{\blackrules}: \showsetup{blackrules} \inleft{\blackrules}There are two versions. Version \type{a} sets \type{n} black rules next to each other with an equal specified width. Version~\type{b} divides the specified width over the number of rules. This paragraph is tagged with \type {\inleft{\blackrules}}. The setup after \type {\blackrule} and \type {\blackrules} are optional. \section[grids]{Grids} \index{grids} \index{squares} \macro{\tex{grid}} We can make squared paper (a sort of grid) with the command: \showsetup{grid} The default setup produces: \startlinecorrection \grid \stoplinecorrection It is used in the background when defining interactive areas in a figure. And for the sake of completeness it is described in this chapter. \stopcomponent
{ "alphanum_fraction": 0.7274754683, "avg_line_length": 25.191011236, "ext": "tex", "hexsha": "0b386858777e6cc7308a4c3e8c0145186460cd28", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "marcpaterno/texmf", "max_forks_repo_path": "contextman/context-reference/en/co-frames.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "marcpaterno/texmf", "max_issues_repo_path": "contextman/context-reference/en/co-frames.tex", "max_line_length": 74, "max_stars_count": null, "max_stars_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "marcpaterno/texmf", "max_stars_repo_path": "contextman/context-reference/en/co-frames.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 7789, "size": 26904 }
\documentclass{warpdoc} \newlength\lengthfigure % declare a figure width unit \setlength\lengthfigure{0.158\textwidth} % make the figure width unit scale with the textwidth \usepackage{psfrag} % use it to substitute a string in a eps figure \usepackage{subfigure} \usepackage{rotating} \usepackage{pstricks} \usepackage[innercaption]{sidecap} % the cute space-saving side captions \usepackage{scalefnt} \usepackage{amsbsy} \usepackage{amsmath} \usepackage{bm} \numberwithin{equation}{section} %%%%%%%%%%%%%=--NEW COMMANDS BEGINS--=%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \newcommand{\alb}{\vspace{0.2cm}\\} % array line break \newcommand{\rhos}{\rho} \newcommand{\Cv}{{c_{v}}} \newcommand{\Cp}{{c_{p}}} \newcommand{\Sct}{{{\rm Sc}_{\rm t}}} \newcommand{\Prt}{{{\rm Pr}_{\rm t}}} \newcommand{\nd}{{{n}_{\rm d}}} \newcommand{\ns}{{{n}_{\rm s}}} \newcommand{\nn}{{{n}_{\rm n}}} \newcommand{\ndm}{{\bar{n}_{\rm d}}} \newcommand{\nsm}{{\bar{n}_{\rm s}}} \newcommand{\turb}{_{\rm t}} \newcommand{\mut}{{\mu\turb}} \newcommand{\mfa}{\scriptscriptstyle} \newcommand{\mfb}{\scriptstyle} \newcommand{\mfc}{\textstyle} \newcommand{\mfd}{\displaystyle} \newcommand{\hlinex}{\vspace{-0.34cm}~~\\ \hline \vspace{-0.31cm}~~\\} \newcommand{\hlinextop}{\vspace{-0.46cm}~~\\ \hline \hline \vspace{-0.32cm}~~\\} \newcommand{\hlinexbot}{\vspace{-0.37cm}~~\\ \hline \hline \vspace{-0.50cm}~~\\} \newcommand{\tablespacing}{\vspace{-0.4cm}} \newcommand{\fontxfig}{\footnotesize\scalefont{0.918}} \newcommand{\fontgnu}{\footnotesize\scalefont{0.896}} \newcommand{\ordi}{{\rm d}} \newcommand{\Acs}{A_{\rm cs}} \newcommand{\mdot}{\dot{m}} \newcommand{\bigfrac}{\mfd\frac} \newcommand\frameeqn[1]{\fbox{$#1$}} \renewcommand{\fontsizetable}{\footnotesize\scalefont{1.0}} \renewcommand{\fontsizefigure}{\footnotesize} \renewcommand{\vec}[1]{\bm{#1}} \setcounter{tocdepth}{3} \let\citen\cite %%%%%%%%%%%%%=--NEW COMMANDS BEGINS--=%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \setcounter{tocdepth}{3} %%%%%%%%%%%%%=--NEW COMMANDS ENDS--=%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \author{ Bernard Parent } \email{ [email protected] } \department{ Institute for Aerospace Studies } \institution{ University of Toronto } \title{ Performance Parameters } \date{ June 2001 } %\setlength\nomenclaturelabelwidth{0.13\hsize} % optional, default is 0.03\hsize %\setlength\nomenclaturecolumnsep{0.09\hsize} % optional, default is 0.06\hsize \nomenclature{ \begin{nomenclaturelist}{Roman symbols} \item[$a$] speed of sound \end{nomenclaturelist} } \abstract{ abstract } \begin{document} \pagestyle{headings} \pagenumbering{arabic} \setcounter{page}{1} %% \maketitle \makewarpdoctitle % \makeabstract \tableofcontents % \makenomenclature %% \listoftables %% \listoffigures \section{Stagnation Pressure} The stagnation pressure is defined as the pressure obtained from the integration of the governing equations (in differential Euler form) till the velocity vanishes along a streamline. Defining $s$ as the coordinate following a streamline, we can write the continuity, momentum and energy equations at steady-state as: % \begin{displaymath} \begin{array}{c@{~~~~~}l} \bigfrac{\ordi }{\ordi s}\rho u \Acs =0 & {\rm continuity}\alb \bigfrac{\ordi }{\ordi s}\rho u^2 \Acs+\bigfrac{\ordi }{\ordi s} P \Acs =-P \bigfrac{\ordi \Acs}{\ordi s} & {\rm momentum}\alb \bigfrac{\ordi }{\ordi s}\rho u \Acs h_{\rm t} =0 & {\rm energy}\alb \end{array} \end{displaymath} % with $h_{\rm t}$ the total enthalpy ($h_{\rm t}\equiv h + u^2/2$). Subtracting the continuity from the momentum and energy gives us the primitive form, % \begin{displaymath} \begin{array}{c@{~~~~~}l} \rho u \bigfrac{\ordi u}{\ordi s} =-\bigfrac{\ordi P}{\ordi s} & {\rm momentum}\alb \bigfrac{\ordi h_{\rm t}}{\ordi s} =0 & {\rm energy} \end{array} \end{displaymath} % which we seek to integrate from state 1 to state 2 (subsequently referred to by the subscripts 1 and 2 respectively). The equations are written in quasi-1D formulation to reflect a possible change of cross-flow area along the streamline. Interestingly, the primitive form of the momentum equation and of the energy equation is unchanged. \subsection{Incompressible Flow} In the case of an \emph{incompressible flow}, the energy equation is not needed to integrate the momentum, as $\rho$ is constant and there is no link between $P$ and $T$. Hence, the integration of the momentum equation can be written as, % \begin{equation} \rho \int_{u_1}^{u_2} u \ordi u = - \int_{P_1}^{P_2} \ordi P \end{equation} % which yields % \begin{equation} \frameeqn{ \mfd P_2 = P_1 + \frac{\rho u_1^2}{2} } \end{equation} % which is the Bernoulli equation. \subsection{Perfect Gas} For a thermally and calorically perfect gas the energy equation has to be taken into account as the density is a function of the pressure and temperature, the latter needing the energy equation to be determined, which is readily integrated to: % \begin{equation} \int_{{(h_{\rm t})}_1}^{h_{\rm t}} \ordi h_{\rm t} =0 \end{equation} % or % \begin{equation} h + \bigfrac{u^2}{2} = h_1 + \bigfrac{u_1^2}{2} \end{equation} % From the ideal gas equation of state, we can say, (using the definition $h\equiv \Cp T$) % \begin{equation} \rho=\bigfrac{P}{RT}=\bigfrac{\Cp P}{Rh} =\bigfrac{\Cp P}{R(h_1+u_1^2/2-u^2/2)} \end{equation} % which we substitute back in the momentum equation $ \rho u \ordi u =-\ordi P$ to get: % \begin{equation} 2 \int_{u_1}^{u_2} \bigfrac{u}{(2 h_1+u_1^2-u^2)} \ordi u =-\int_{P_1}^{P_2} \frac{R}{\Cp P}\ordi P \end{equation} % After integration, this becomes: % \begin{equation} -\ln (2 h_1+u_1^2-u_2^2) +\ln (2 h_1+u_1^2-u_1^2) =- \frac{R}{\Cp}\ln (P_2/P_1) \end{equation} % Using $\gamma \equiv \Cp/\Cv$ and $R=\Cp-\Cv$, and rearranging: % \begin{equation} \frac{\gamma}{\gamma-1} \ln ((h_1+u_1^2/2-u_2^2/2)/h_1) =\ln (P_2/P_1) \end{equation} % which is equivalent to (since at stagnation $u_2=0$): % \begin{equation} P_2=P_1 \left[ 1+\frac{u_1^2}{2 h_1} \right]^\frac{\gamma}{\gamma-1} \end{equation} % The enthalpy can be shown to be equal to $h=(\gamma R T)/(\gamma-1)$, which transforms the latter into: % \begin{equation} P_2=P_1 \left[ 1+\frac{\gamma-1}{2} \frac{u_1^2}{\gamma R T_1} \right]^\frac{\gamma}{\gamma-1} \end{equation} % or, % \begin{equation} \frameeqn{ \mfd P_2=P_1 \left[ 1+\frac{\gamma-1}{2} {\rm M}_1^2 \right]^\frac{\gamma}{\gamma-1} } \end{equation} % It is noted that in the above, a constant entropy path has not been \emph{forced} on the integration: rather, the differential Euler equations generate an isentropic process automatically. Indeed, a change in entropy can only occur when an irreversible phenomenon is present, which is not possible in the differential Euler equations. A shockwave (which introduces a change in entropy), is a viscous phenomenon, and is not more a solution to the differential Euler equations than a boundary layer or a shear layer. What causes the confusion is the fact that the properties after the shock can be obtained from the Rankine/Hugoniot approach, a technique which bypasses the viscous terms by integrating a control volume in which the shock is located. In that sense, a shockwave could be thought of as being a solution to the integral form of the Euler equations (but definitely not to the differential form). In short, while the properties after the shock can be estimated in the inviscid world, its solution can only be obtained using the full equations of motion including all viscous effects. \subsection{Perfect Gas with Turbulence} When turbulent kinetic energy terms are present in the momentum and energy equations, a different expression for the stagnation pressure exists. From the continuity and TKE equations, we can say: % \begin{equation} k={\rm constant}=k_1 \end{equation} % and the TKE part of the energy equation cancels out, leading to the expression, % \begin{equation} h + \bigfrac{u^2}{2} = h_1 + \bigfrac{u_1^2}{2} \end{equation} % The momentum equation along a streamline can be written as, % \begin{equation} \rho u \ordi u =-\ordi P^\star \end{equation} % recalling that $P^\star=P+\frac{2}{3}\rho k$. From the ideal gas equation of state, we can say, (using the definition $h\equiv \Cp T$) % \begin{equation} \begin{array}{r} \rho=\bigfrac{P}{RT} =\bigfrac{P^\star-\frac{2}{3}\rho k}{RT} = \frac{P^\star}{RT} - \frac{2}{3} \frac{\rho k}{R T} = \frac{P^\star}{RT+\frac{2}{3}k} \alb = \bigfrac{\Cp P^\star}{Rh+\Cp\frac{2}{3}k} = \frac{\Cp P^\star}{R\left(h_1+u_1^2/2-u^2/2\right)+\Cp\frac{2}{3}k_1} \end{array} \end{equation} % which we substitute back in the momentum equation to get: % \begin{equation} \frac{\Cp P^\star}{R\left(h_1+u_1^2/2-u^2/2\right)+\Cp\frac{2}{3}k_1} u \ordi u =-\ordi P^\star \end{equation} % After integration, this becomes: % \begin{equation} -\ln \left(2 h_1+u_1^2-u_2^2+\frac{4}{3}k_1 \frac{\Cp}{R}\right) +\ln \left(2 h_1+u_1^2-u_1^2+\frac{4}{3}k_1 \frac{\Cp}{R}\right) = - \frac{R}{\Cp}\ln \left(\frac{P_2^\star}{P_1^\star}\right) \end{equation} % Using $\gamma \equiv \Cp/\Cv$ and $R=\Cp-\Cv$, and rearranging: % \begin{equation} \frac{\gamma}{\gamma-1} \ln \left( \frac{2 h_1+u_1^2-u_2^2+\frac{4}{3}k_1 \frac{\Cp}{R}} {2 h_1+\frac{4}{3}k_1 \frac{\Cp}{R}} \right) = \ln \left(\frac{P_2^\star}{P_1^\star}\right) \end{equation} % which is equivalent to (since at stagnation $u_2=0$): % \begin{equation} P_2^\star = P_1^\star \left[ 1+ \frac{u_1^2}{2 h_1+\frac{4}{3}k_1 \frac{\Cp}{R}} \right] ^\frac{\gamma}{\gamma-1} \end{equation} % The sound speed can be expressed as $a^2=\frac{2}{3}\gamma k+(\gamma-1) h$, which, upon substitution in the latter equation, would give, % \begin{equation} \frameeqn{ \mfd P_2^\star = P_1^\star \left[ 1+ \frac{\gamma-1}{2} {\rm M}_1^2 \right] ^\frac{\gamma}{\gamma-1} } \end{equation} % with ${\rm M} \equiv u/a$. \subsection{High-Temperature Gas with Turbulence} This subsection will tackle the stagnation pressure for a high-temperature gas (thermally perfect but not calorically perfect). Similarly to the previous subsection, we find that along a streamline, % \begin{equation} k=k_1={\rm constant} \end{equation} % For a real gas, a closed form solution for the stagnation pressure is unavailable and one must resort to a numerical integration of the momentum equation, % \begin{equation} \int_{P^\star_1}^{P^\star_2} \ordi P^\star =\int_{u_1}^{u_2} -\rho u \ordi u \end{equation} % where $\rho$ is updated after each small step from: % \begin{equation} \rho=\frac{P^\star}{R T+\frac{2}{3}k} \end{equation} % with $T$ determined from $h$, for which an expression function of the velocity alone takes the form, % \begin{equation} h= - \bigfrac{u^2}{2} + h_1 + \bigfrac{u_1^2}{2} \end{equation} % We can further simplify the integration as (noting that $u_2=0$): % \begin{equation} \ln \left( \frac{P^\star_2}{P^\star_1}\right) =-\int_{u_1}^0 \frac{u}{R T+\frac{2}{3}k} \ordi u \end{equation} % or, % \begin{equation} \frameeqn{ \mfd P^\star_2 =P^\star_1 \exp \left[ \int^{u_1}_0 \frac{u}{R T+\frac{2}{3}k} \ordi u \right] } \end{equation} % \section{Stagnation Temperature} Along a streamline the total enthalpy is conserved, % \begin{displaymath} \begin{array}{c@{~~~~~}l} \bigfrac{\ordi h_{\rm t}}{\ordi s} =0 & {\rm energy} \end{array} \end{displaymath} % which is the only equation needed to determine the stagnation temperature. \subsection{Perfect Gas with Turbulence} This subsection will tackle the stagnation temperature for a high-temperature gas (thermally perfect but not calorically perfect). Similarly to the previous subsection, we find that along a streamline, % \begin{equation} k=k_1={\rm constant} \end{equation} % The stagnation temperature $T_2$ is determined from the conservation of total enthalpy: % \begin{equation} h_2+\frac{5}{3}k_1=h_1+\frac{5}{3}k_1+\frac{u_1^2}{2} \end{equation} % or, using the definition $h=\Cp T$ and $a^2=\frac{2}{3}\gamma k + (\gamma-1)h$, % \begin{equation} \frac{T_2}{T_1}= 1+\frac{u_1^2}{2\Cp T_1} = 1+\frac{(\gamma-1) u_1^2}{2 (a_1^2-\frac{2}{3}\gamma k_1)} \end{equation} % since $\Cp T=(a^2-\frac{2}{3}\gamma k)/(\gamma-1)$. Defining ${\rm M}\turb$ as: % \begin{equation} {\rm M}\turb^2 \equiv \frac{2 k}{a^2-\frac{2}{3}\gamma k} \end{equation} % we can rewrite the latter as % \begin{equation} \frac{T_2}{T_1} = 1+\frac{(\gamma-1) u_1^2}{2 a_1^2} \frac{a_1^2}{(a_1^2-\frac{2}{3}\gamma k_1)} = 1+\frac{(\gamma-1)}{2} {\rm M}_1^2 \left(1+\frac{\gamma}{3} {\rm M}\turb^2 \right) \end{equation} % Finally: % \begin{equation} \frameeqn{ T^\circ = T \left[ 1+\mfd\frac{\gamma-1}{2} {\rm M}^2 \left(1+\mfd\frac{\gamma}{3} {\rm M}\turb^2 \right) \right] } \end{equation} % We can rewrite the latter in terms of the stagnation pressure ratio: % \begin{equation} \frameeqn{ \mfd\frac{T^\circ}{T} = \mfd\frac{{P^\circ}^j}{{P^\star}^j} +\mfd\frac{\gamma^2-\gamma}{6} {\rm M}^2 {\rm M}\turb^2 } \end{equation} % with $j=(\gamma-1)/\gamma$. \section{Entropy Along a Particle Path} In this section, we will show that the entropy along a streamline (for the differential Euler equations) cannot vary. The proof is limited to a calorically and thermally perfect gas. First, starting from a thermodynamic expression of the entropy, % \begin{equation} s = \Cv \ln e - R \ln \rho + {\rm constant} \end{equation} % Taking the partial derivative on both sides with respect to time and space, one gets: % \begin{eqnarray} \frac{\partial s}{\partial t} &=& \frac{\Cv}{e} \frac{\partial e}{\partial t} -\frac{R}{\rho} \frac{\partial \rho}{\partial t} \alb \frac{\partial s}{\partial x} &=& \frac{\Cv}{e} \frac{\partial e}{\partial x} -\frac{R}{\rho} \frac{\partial \rho}{\partial x} \end{eqnarray} % Combining the latter two equations, % \begin{equation} \rho \frac{\partial s}{\partial t} + \rho u \frac{\partial s}{\partial x} = \frac{\rho \Cv}{e} \frac{\partial e}{\partial t} - R \frac{\partial \rho}{\partial t} + \frac{\rho u \Cv}{e} \frac{\partial e}{\partial x} - u R \frac{\partial \rho}{\partial x} \label{eqn:govs1} \end{equation} % Now, introduce the energy equation: % \begin{equation} \rho \frac{\partial}{\partial t} \left( e + \frac{u^2}{2} \right) + \rho u \frac{\partial}{\partial x} \left( e + \frac{u^2}{2} \right) = - \frac{\partial}{\partial x} u P \end{equation} % or, % \begin{equation} \rho \frac{\partial e}{\partial t} + \rho u \frac{\partial u}{\partial t} + \rho u \frac{\partial e}{\partial x} + \rho u^2 \frac{\partial u}{\partial x} + u \frac{\partial P}{\partial x} + P \frac{\partial u}{\partial x} =0 \end{equation} % Substituting $\partial e / \partial t$ back into Eq.~(\ref{eqn:govs1}), we get: % \begin{equation} \rho \frac{\partial s}{\partial t} + \rho u \frac{\partial s}{\partial x} = - \frac{\rho u \Cv}{e} \frac{\partial u}{\partial t} - \frac{\rho u^2 \Cv}{e} \frac{\partial u}{\partial x} - \frac{u \Cv}{e} \frac{\partial P}{\partial x} - \frac{\Cv P}{e} \frac{\partial u}{\partial x} - R \frac{\partial \rho}{\partial t} - u R \frac{\partial \rho}{\partial x} \label{eqn:govs2} \end{equation} % Now, introduce the momentum equation: % \begin{equation} \rho \frac{\partial u}{\partial t}=-\rho u \frac{\partial u}{\partial x} - \frac{\partial P}{\partial x} \end{equation} % Substitute $\partial u / \partial t$ into Eq.~(\ref{eqn:govs2}), % \begin{equation} \rho \frac{\partial s}{\partial t}+ \rho u \frac{\partial s}{\partial x} = - \rho R \frac{\partial u}{\partial x} - R \frac{\partial \rho}{\partial t} - u R \frac{\partial \rho}{\partial x} \label{eqn:govs3} \end{equation} % Then, introduce the continuity equation, $\partial \rho /\partial t = -\partial \rho u / \partial x$ Eq.~(\ref{eqn:govs3}) becomes % \begin{equation} \frameeqn{ \mfd\rho \frac{\partial s}{\partial t}+ \rho u \frac{\partial s}{\partial x}=0 } \label{eqn:govs4} \end{equation} % Equation~(\ref{eqn:govs4}) simply states that a flow particle can not lose or gain entropy in the Euler world. A change in entropy is indeed associated with an irreversible phenomenon, and no irreversible phenomenon is possible in the differential Euler equations of motion. While the latter proof was done in one-dimension only, it is easy to see that in multiple dimensions, we would arrive to the same conclusion. \section{Thrust Potential} \begin{figure}[t] \fontxfig \psfrag{A}[lb][lb][1][0]{Engine flowfield schematic.} \psfrag{B}[lb][lb][1][0]{Expansion of the flow properties at station $x_2$ to the domain exit.} \psfrag{D}[t][t][1][0]{$x_1$ (inlet)} \psfrag{E}[t][t][1][0]{$x_3$ (outlet)} \psfrag{C}[t][t][1][0]{$x_2$ ($x$-station of interest)} \begin{center} \includegraphics[width=5.0in]{engine.eps} \end{center} \caption{Hypersonic engine schematic, along with the control volume boundaries used to determine the thrust of the engine (top); control volume boundaries around the engine along with the $x$-station under consideration ($x_2$) for the thrust potential (bottom); note that the flow is expanded from station $x_2$ to station $x_3$ along an isentropic path.} \label{fig:engine} \end{figure} % Probably the most accurate manner in which to assess the losses part of a flight vehicle is through the thrust potential method, as outlined in Riggins {\it et al.}\cite{jpp:1997:riggins}. The idea is to integrate the momentum flux expanded to a certain state (either ambiant pressure or exit area) from the properties at a given streamwise cross-section, and to subtract the integral of the momentum flux at the engine entrance: % \begin{equation} {\rm thrust~potential} = \int_{x=x_3} \left(\rho u^2 + P \right) \ordi A - \int_{x=x_1} \left(\rho u^2 + P \right) \ordi A \end{equation} % For the engine configuration shown in Fig.~\ref{fig:engine}, the thrust can be readily determined through a control volume analysis as the difference between the outlet momentum and the inlet momentum, minus the shear stresses on the top and bottom boundaries of the engine. It is noted that the additive drag on the exterior of the engine is independant of the flowfield characteristics of the engine, and is hence ignored for the determination of the thrust potential. Further, Fig.~\ref{fig:engine} implies that the flow area at the outlet matches the one at the inlet. This is not such a bad assumption since scramjet flows are typically underexpanded\cite{book:1994:pratt} with a pressure at the exit greater than ambiant, and the area cannot be made much larger than the inlet area to minimize the drag forces on the external surfaces. We therefore decide to base our definition of the thrust potential on a fixed outlet area, equal to the inlet area. We further specify the pressure to be equal for all streamlines at the outlet, but do not force a specific value. We will now express the thrust potential in concrete terms. The turbulence kinetic energy along a streamline can be taken as a constant if the source terms are neglected: % \begin{equation} k=k_2={\rm constant} \end{equation} % From the momentum equation along a streamline (Euler formulation with turbulence): % \begin{equation} \ordi P^\star = - \rho u \ordi u \label{eqn:thrust:momentum} \end{equation} % and, at each point along the integration, the total enthalpy is identical to the one at station $x_2$ % \begin{equation} h + \frac{u^2}{2} = h_2 + \frac{u_2^2}{2} \end{equation} % Now, first guess an effective pressure at the outlet, $P_3^\star$. Integrate for each streamline Eq.~(\ref{eqn:thrust:momentum}) from $P_2$ to $P_3$ and obtain consequently $\rho_3$ and $u_3$. This can be done analytically in the case of a perfect gas but numerically for a real gas. From the conservation of mass, the expanded area $\ordi A_3$ of a flow particle occupying the area $\ordi A_2$ then corresponds to: % \begin{equation} \ordi A_3 = \frac{\rho_2 u_2}{\rho_3 u_3} \ordi A_2 \end{equation} % with the area at $x=x_3$ equal to: % \begin{equation} A_3= \int \ordi A_3 \end{equation} % Since $A_3$ is desired to be equal to $A_1$, a Newton-Raphson iteration can be performed to find a better guess to $P_3^\star$. \subsection{Perfect Gas with Turbulence} Recall that for a perfect gas with turbulence, we can say that: % \begin{equation} \frac{\gamma}{\gamma-1} \ln \left( \frac{2 h_2+u_2^2-u_3^2+\frac{4}{3}k_2 \frac{\Cp}{R}} {2 h_2+\frac{4}{3}k_2 \frac{\Cp}{R}} \right) = \ln \left(\frac{P_3^\star}{P_2^\star}\right) \label{eqn:perfectv} \end{equation} % or, % \begin{equation} \mfd u_3 = \left[ 2 h_2+\frac{4}{3}k_2 \frac{\Cp}{R}+u_2^2 -\left( 2 h_2 +\frac{4}{3}k_2 \frac{\Cp}{R} \right) \left(\frac{P_3^\star}{P_2^\star}\right)^\frac{\gamma-1}{\gamma} \right]^\frac{1}{2} \tag{\ref{eqn:perfectv}a} \label{eqn:perfectv-2} \end{equation} % but, using the relationship $h+\frac{2}{3}k\frac{\Cp}{R}=a^2/(\gamma-1)$, % \begin{equation} \frameeqn{ \mfd u_3 = \left[ \frac{2 a_2^2}{\gamma-1}+u_2^2 -\left( \frac{2 a_2^2}{\gamma-1} \right) \left(\frac{P_3^\star}{P_2^\star}\right)^\frac{\gamma-1}{\gamma} \right]^\frac{1}{2} } \tag{\ref{eqn:perfectv}b} \label{eqn:perfectv-final} \end{equation} % And through the energy equation and the equation of state, % \begin{equation} h_2+u_2^2/2-u_3^2/2 +\frac{2}{3}\frac{\Cp}{R} k_2 = \frac{\gamma}{\gamma-1} \frac{P_3^\star}{\rho_3} \label{eqn:perfectrho} \end{equation} % or, % \begin{equation} \left( 2 h_2 +\frac{4}{3}\frac{\Cp}{R} k_2 \right) \left( \frac{P_3^\star}{P_2^\star}\right)^{\frac{\gamma-1}{\gamma}} = \frac{2 \gamma}{\gamma-1} \frac{P_3^\star}{\rho_3} \tag{\ref{eqn:perfectrho}a} \end{equation} % or, % \begin{equation} \left( \frac{P^\star_2}{\rho_2} \right) \left( \frac{P_3^\star}{P_2^\star}\right)^{\frac{\gamma-1}{\gamma}} = \frac{P_3^\star}{\rho_3} \tag{\ref{eqn:perfectrho}b} \end{equation} % and, % \begin{equation} \frameeqn{\rho_3 = \rho_2 \left( \mfd \frac{P_3^\star}{P_2^\star} \right)^\frac{1}{\gamma}} \tag{\ref{eqn:perfectrho}c} \label{eqn:perfectrho-final} \end{equation} % Through conservation of mass, % \begin{equation} \int \frac{\rho_2 u_2^\perp}{\rho_3 u_3} \ordi A_2 = A_3 \label{eqn:perfectP3} \end{equation} % where $u_2^\perp$ is the velocity component perpendicular to $\ordi A_2$. At station $3$, the flow is assumed to be expanded with the streamlines perpendicular to exit area, and the total velocity will correspond to the velocity perpendicular to surface $A_3$. Substituting expressions for $\rho_3$ and $u_3$ in the latter, % \begin{equation} \int \left. {\rho_2 u_2^\perp} \left/ \rho_2 \left( \mfd \frac{P_3^\star}{P_2^\star} \right)^\frac{1}{\gamma} \left[ 2 h_2+\frac{4}{3}k_2 \frac{\Cp}{R}+u_2^2 -\left( 2 h_2 +\frac{4}{3}k_2 \frac{\Cp}{R} \right) \left(\frac{P_3^\star}{P_2^\star}\right)^\frac{\gamma-1}{\gamma} \right]^\frac{1}{2} \right. \ordi A_2 \right.= A_3 \tag{\ref{eqn:perfectP3}a} \end{equation} % which can be recast to, using the sound speed $a^2=\frac{2}{3}\gamma k +(\gamma-1) h$: % \begin{equation} A_3=\mfd \int \left. {\rm M}_2^\perp \left( \mfd \frac{P_2^\star}{P_3^\star} \right)^\frac{1}{\gamma} \left[ \frac{2}{\gamma-1}+{\rm M}_2^2 - \frac{2}{\gamma-1} \left(\frac{P_3^\star}{P_2^\star}\right)^\frac{\gamma-1}{\gamma} \right]^{-\frac{1}{2}} \right. \ordi A_2 \tag{\ref{eqn:perfectP3}b} \label{eqn:perfectP3-2} \end{equation} % % \begin{equation} \frameeqn{ A_3=\mfd\int u_2^\perp {P_2^\star}^\frac{1}{\gamma} \left/ u_3 {P_3^\star}^\frac{1}{\gamma} \ordi A_2 \right. } \tag{\ref{eqn:perfectP3}c} \label{eqn:perfectP3-final} \end{equation} % from which $P_3^\star$ and $A_3$ are the only unknowns. For $P_3^\star$ user-specified, $A_3$ can be readily determined. For $A_3$ user-specified, a Newton-Raphson iteration can be used to find $P_3^\star$. Once $P_3^\star$ is determined from Eq.~(\ref{eqn:perfectP3-final}), the thrust potential can be directly integrated at the $x$-station of interest as % \begin{equation} {\rm thrust~potential} = \mfd\int \left(\rho_3 u_3^2 + P_3^\star \right) \frac{\rho_2 u_2^\perp}{\rho_3 u_3} \ordi A_2 - \mfd\int \left(\rho_1 u_1^2 + P_1^\star \right) \ordi A_1 \end{equation} % where $\rho_3$ is substituted from Eq.~(\ref{eqn:perfectrho-final}): % \begin{equation} \frameeqn{ {\rm thrust~potential}= \mfd\int \!\rho_2 u_{3} u_2^\perp + u_2^\perp {P^\star_2}^{\frac{1}{\gamma}} \left/ u_{3}{P_3^\star}^{\frac{1-\gamma}{\gamma}} \right.\ordi A_2 - \mfd\int \!\left(\rho u_1^2 + P_1^\star \right) \ordi A_1 } \end{equation} % where $u_3$ is taken from Eqs.~(\ref{eqn:perfectv-final}), and $P_3^\star$ is found from Eq.~(\ref{eqn:perfectP3-final}) by user-specifying $A_3$. \subsection{High-Temperature Gas with Turbulence} The turbulent kinetic energy can be shown to be constant along a streamline for the source terms neglected: % \begin{equation} k=k_2={\rm constant} \end{equation} % From the momentum equation along a streamline (Euler formulation with turbulence): % \begin{equation} \ordi P^\star = - \rho u \ordi u \end{equation} % and, at each point along the integration, the total enthalpy is identical to the one at station $x_2$ % \begin{equation} h + \frac{u^2}{2} = h_2 + \frac{u_2^2}{2} \end{equation} % which basically gives a relationship between the temperature and the flow speed. The last three equations can be combined with the equation of state to form the following: % \begin{equation} \frameeqn{ \mfd\frac{\ordi u}{\ordi P^\star} = - \frac{RT + \frac{2}{3}k_2}{P^\star u} } \label{eqn:thrust:momentum:final} \end{equation} % If numerically integrated from point 2 to 3, the latter can give $u_3$ for $u_2$, $P^\star_2$ and $P^\star_3$ specified. Now, first guess an effective pressure at the outlet, $P_3^\star$. Integrate for each streamline Eq.~(\ref{eqn:thrust:momentum:final}) from $P_2^\star$ to $P_3^\star$ and obtain consequently $\rho_3$ and $u_3$. Then, from conservation of mass principles, the expanded area $\ordi A_3$ of a flow particle occupying the area $\ordi A_2$ corresponds to: % \begin{equation} \ordi A_3 = \frac{\rho_2 u_2^\perp}{\rho_3 u_3} \ordi A_2 \end{equation} % where the superscript $\perp$ refers to taking the velocity component perpendicular to the area $\ordi A_2$. The total area of the flow at $x=x_3$ equal to: % \begin{equation} \frameeqn{ \mfd A_3= \int \frac{\rho_2 u_2^\perp}{\rho_3 u_3} \ordi A_2 } \end{equation} % Since $A_3$ is desired to be equal to $A_1$, a Newton-Raphson iteration can be performed to find a better guess to $P_3^\star$. Finally, once $P_3^\star$ is determined, the thrust potential can be expressed as: % \begin{equation} {\rm thrust~potential} = \mfd\int \left(\rho_3 u_3^2 + P_3^\star \right) \frac{\rho_2 u_2^\perp}{\rho_3 u_3} \ordi A_2 - \mfd\int \left(\rho_1 u_1^2 + P_1^\star \right) \ordi A_1 \end{equation} % or % \begin{equation} \frameeqn{ {\rm thrust~potential} = \mfd\int \frac{\rho_3 u_3^2 + P_3^\star}{\rho_3 u_3} \ordi \dot{m} - \mfd\int \left(\rho_1 u_1^2 + P_1^\star \right) \ordi A_1 } \end{equation} % \subsection{Perfect Gas with Turbulence, Revisited} The relationship for the stagnation pressure was proven previously to be: % \begin{equation} \mfd P^\circ = P^\star \left[ 1+ \frac{\gamma-1}{2} {\rm M}^2 \right] ^\frac{\gamma}{\gamma-1} \end{equation} % The thrust potential is recalled to correspond to: % \begin{equation} \frameeqn{ {\rm thrust~potential}= \mfd\int_2 \! u_{3} + {P^\star}^{\frac{1}{\gamma}} \left/ \rho u_{3}{P_3^\star}^{\frac{1-\gamma}{\gamma}} \right.\ordi \mdot - \mfd\int_1 \!\left(\rho u^2 + P^\star \right) \ordi A } \end{equation} % while an expression for $u_3$ was shown previously to correspond to: % \begin{equation} \mfd u_3 = a \left[ \frac{2 }{\gamma-1}+{\rm M}^2 -\left( \frac{2 }{\gamma-1} \right) \left(\frac{P_3^\star}{P^\star}\right)^\frac{\gamma-1}{\gamma} \right]^\frac{1}{2} \end{equation} % % \begin{equation} \mfd u_3 = a \sqrt{\frac{2 }{\gamma-1}} \left[ 1+\frac{\gamma-1}{2}{\rm M}^2 -\left(\frac{P_3^\star}{P^\star}\right)^\frac{\gamma-1}{\gamma} \right]^\frac{1}{2} \end{equation} % % \begin{equation} \mfd u_3 = a \sqrt{\frac{2 }{\gamma-1}} \left[ \left(\frac{P^\circ}{P^\star} \right)^\frac{\gamma-1}{\gamma} -\left(\frac{P_3^\star}{P^\star}\right)^\frac{\gamma-1}{\gamma} \right]^\frac{1}{2} \end{equation} % say $j=(\gamma-1)/\gamma$, then, % \begin{equation} \frameeqn{ \mfd u_3 = a \sqrt{\frac{2 }{\gamma-1}} \left[ \frac{{P^\circ}^j-{P_3^\star}^j}{{P^\star}^j} \right]^\frac{1}{2} } \end{equation} % Further, let's seek an expression for $\rho a^2$ knowing $a^2=\frac{2}{3}\gamma k+(\gamma-1) h=\frac{2}{3}\gamma k+\gamma R T$: % \begin{equation} \rho a^2 = \frac{P^\star (\frac{2}{3}\gamma k+\gamma R T)}{R T +\frac{2}{3} k} = \gamma P^\star \end{equation} % then, an expression for $\rho u_3$ would look like: % \begin{equation} \mfd \rho u_3 = \frac{\gamma P^\star}{a} \sqrt{\frac{2 }{\gamma-1}} \left[ \frac{{P^\circ}^j-{P_3^\star}^j}{{P^\star}^j} \right]^\frac{1}{2} \end{equation} % % \begin{equation} \frameeqn{ \mfd \rho u_3 = \frac{\gamma}{a} \sqrt{\frac{2 }{\gamma-1}} \left[ {P^\circ}^j-{P_3^\star}^j \right]^\frac{1}{2} {P^\star}^\frac{\gamma+1}{2\gamma} } \end{equation} % Now, we will seek to simplify the first integral of the thrust potential equation using the obtained expressions for $u_3$ and $\rho u_3$: % \begin{equation} \begin{array}{l} \mfd\int_2 \! u_{3} + {P^\star}^{\frac{1}{\gamma}} \left/ \rho u_{3}{P_3^\star}^{\frac{1-\gamma}{\gamma}} \right.\ordi \mdot \alb ~~~~=\mfd\int_2 \! a \sqrt{\frac{2 }{\gamma-1}} \left[ \frac{{P^\circ}^j-{P_3^\star}^j}{{P^\star}^j} \right]^\frac{1}{2} + {P_3^\star}^{\frac{\gamma-1}{\gamma}} {P^\star}^{\frac{1}{\gamma}} \left/ \frac{\gamma}{a} \sqrt{\frac{2 }{\gamma-1}} \left[ {P^\circ}^j-{P_3^\star}^j \right]^\frac{1}{2} {P^\star}^\frac{\gamma+1}{2\gamma} \right.\ordi \mdot \alb ~~~~=\mfd\int_2 \! a {P^\star}^{\frac{-j}{2}} \sqrt{\frac{2 }{\gamma-1}} \left[{P^\circ}^j-{P_3^\star}^j\right]^\frac{1}{2} + a {P_3^\star}^{\frac{\gamma-1}{\gamma}} {P^\star}^{\frac{1-\gamma}{2\gamma}} \left/ \sqrt{\frac{2 \gamma^2}{\gamma-1}} \left[ {P^\circ}^j-{P_3^\star}^j \right]^\frac{1}{2} \right.\ordi \mdot \alb ~~~~=\mfd\int_2 \! \frac{ a {P^\star}^{\frac{-j}{2}} }{ \sqrt{\frac{2 \gamma^2}{\gamma-1}} \left[ {P^\circ}^j-{P_3^\star}^j \right]^\frac{1}{2} } \left[ \frac{2 \gamma}{\gamma-1} \left[{P^\circ}^j-{P_3^\star}^j\right] + {P_3^\star}^j \right] \ordi \mdot\alb ~~~~=\mfd\int_2 \! \frac{ a {P^\star}^{\frac{-j}{2}} }{ \sqrt{2 j \gamma} \left[ {P^\circ}^j-{P_3^\star}^j \right]^\frac{1}{2} } \left[ 2 {P^\circ}^j + (j-2) {P_3^\star}^j \right] \ordi \mdot\alb ~~~~=\mfd\int_2 \! \frac{ a \left[ 2 {P^\circ}^j + (j-2) {P_3^\star}^j \right] }{ \left[2 j \gamma {P^\star}^j ( {P^\circ}^j-{P_3^\star}^j) \right]^\frac{1}{2} } \ordi \mdot\alb \end{array} \end{equation} % therefore, % \begin{equation} \frameeqn{ \mfd\int_2 \! u_{3} + {P^\star}^{\frac{1}{\gamma}} \left/ \rho u_{3}{P_3^\star}^{\frac{1-\gamma}{\gamma}} \right.\ordi \mdot =\mfd\int_2 \! \frac{ a \left[ 2 {P^\circ}^j + (j-2) {P_3^\star}^j \right] }{ \left[2 j \gamma {P^\star}^j ( {P^\circ}^j-{P_3^\star}^j) \right]^\frac{1}{2} } \ordi \mdot } \end{equation} % \subsubsection{Special Case for Vanishing Back Pressure} Let's assume that $P_3^\star=0$. Then, % % \begin{equation} \begin{array}{l} \mfd\int_2 \! u_{3} + {P^\star}^{\frac{1}{\gamma}} \left/ \rho u_{3}{P_3^\star}^{\frac{1-\gamma}{\gamma}} \right.\ordi \mdot \alb ~~~~=\mfd\int_2 \! \frac{ a \left[ 2 {P^\circ}^j + (j-2) {P_3^\star}^j \right] }{ \left[2 j \gamma {P^\star}^j ( {P^\circ}^j-{P_3^\star}^j) \right]^\frac{1}{2} } \ordi \mdot =\mfd\int_2 \! \frac{ a \left[ 2 {P^\circ}^j \right] }{ \left[2 j \gamma {P^\star}^j {P^\circ}^j\right]^\frac{1}{2} } \ordi \mdot =\mfd\int_2 \! \left[ \frac{ 2 a^2 {P^\circ}^j }{ j \gamma {P^\star}^j } \right]^{1/2} \ordi \mdot \end{array} \end{equation} % and, from the relationship between the stagnation pressure ratio and the stagnation temperature ratio: % % \begin{align*} \int_2& \! u_{3} + {P^\star}^{\frac{1}{\gamma}} \left/ \rho u_{3}{P_3^\star}^{\frac{1-\gamma}{\gamma}} \right.\ordi \mdot \alb &=\!\mfd\int_2 \! \left[ \frac{2 (\gamma R T + \frac{2}{3} \gamma k)}{j \gamma} \left( \frac{T^\circ}{T} - \frac{\gamma^2-\gamma}{6} {\rm M}^2 {\rm M}^2\turb \right) \right]^{1/2} \ordi \mdot\alb &=\!\mfd \int_2 \! \frac{1}{\sqrt{j}} \left[ \left( 2 R T + \frac{4}{3} k \right) \left( \frac{T^\circ}{T} - \frac{\gamma^2-\gamma}{6} {\rm M}^2 {\rm M}^2\turb \right) \right]^{1/2} \ordi \mdot\alb &=\!\mfd \int_2 \! \frac{1}{\sqrt{j}}\left[ 2 R T^\circ - 2 R T \frac{\gamma^2-\gamma}{6} {\rm M}^2 {\rm M}^2\turb + \frac{4}{3} k \frac{T^\circ}{T} - \frac{4}{3} k \frac{\gamma^2-\gamma}{6} {\rm M}^2 {\rm M}^2\turb \right]^{1/2} \ordi \mdot\alb &=\!\mfd \int_2 \! \frac{1}{\sqrt{j}}\left[ 2 R T^\circ + \frac{4}{3} k \left( -\frac{\gamma-1}{2} {\rm M}^2 + \frac{T^\circ}{T} - \frac{\gamma^2-\gamma}{6} {\rm M}^2 {\rm M}^2\turb \right) \right]^{1/2} \ordi \mdot\alb &=\!\mfd \int_2 \! \frac{1}{\sqrt{j}}\left[ 2 R T^\circ + \frac{4}{3} k \left( -\frac{\gamma-1}{2} {\rm M}^2 +1+\mfd\frac{\gamma-1}{2} {\rm M}^2 \left(1+\mfd\frac{\gamma}{3} {\rm M}\turb^2 \right) - \frac{\gamma^2-\gamma}{6} {\rm M}^2 {\rm M}^2\turb \right) \right]^{1/2} \!\!\!\!\! \ordi \mdot\alb \end{align*} % and finally, % \begin{equation} \frameeqn{ \mfd\int_2 \! u_{3} + {P^\star}^{\frac{1}{\gamma}} \left/ \rho u_{3}{P_3^\star}^{\frac{1-\gamma}{\gamma}} \right.\ordi \mdot =\int_2 \! \left[ \frac{2\gamma}{\gamma-1} \left( R T^\circ + \frac{2}{3} k \right) \right]^\frac{1}{2} \ordi \mdot } \end{equation} % which shows clearly that the thrust potential is \emph{only} a function of the stagnation temperature and the turbulence kinetic energy, and \emph{not} of the stagnation pressure, in the particular case of vanishing pressure at the exit. \subsubsection{Expressing the Thrust Potential as a Function of the Stagnation Temperature} Starting from where we left off, % \begin{align*} \mfd\int_2& \! u_{3} + {P^\star}^{\frac{1}{\gamma}} \left/ \rho u_{3}{P_3^\star}^{\frac{1-\gamma}{\gamma}} \right.\ordi \mdot\alb &=\mfd\int_2 \! \frac{ a \left[ 2 {P^\circ}^j + (j-2) {P_3^\star}^j \right] }{ \left[2 j \gamma {P^\star}^j ( {P^\circ}^j-{P_3^\star}^j) \right]^\frac{1}{2} } \ordi \mdot\alb &=\mfd\int_2 \! \left[ 2+ (j-2) \frac{{P_3^\star}^j}{{P^\circ}^j} \right] \left/ \left[\frac{2\gamma j}{a^2} \frac{{P^\star}^j}{{P^\circ}^j} \left( 1-\frac{{P_3^\star}^j}{{P^\circ}^j}\right) \right]^\frac{1}{2} \right. \ordi \mdot\alb &=\mfd\int_2 \! \left[ 2+ (j-2) \frac{{P_3^\star}^j}{{P^\circ}^j} \right] \left/ \left[\frac{2 j}{R T^\circ + \frac{2}{3} k} \left( 1-\frac{{P_3^\star}^j}{{P^\circ}^j}\right) \right]^\frac{1}{2} \right. \ordi \mdot\alb &=\mfd\int_2 \! \left(\frac{2 R T^\circ + \frac{4}{3} k}{j}\right)^\frac{1}{2} \left[ 1+ \left(\frac{j}{2}-1\right) \frac{{P_3^\star}^j}{{P^\circ}^j} \right] \left/ \left( 1-\frac{{P_3^\star}^j}{{P^\circ}^j}\right)^\frac{1}{2} \right. \ordi \mdot \end{align*} % % \begin{figure}[ht] \begin{center} \psfrag{X}[t][t][1][0]{$P^\circ/P_3^\star$} \psfrag{Y}[b][b][1][0]{$\left[ 1+ \left(\frac{j}{2}-1\right) \frac{{P_3^\star}^j}{{P^\circ}^j} \right] \left/ \left( 1-\frac{{P_3^\star}^j}{{P^\circ}^j}\right)^\frac{1}{2} \right.$} \includegraphics[width=3.3in]{pstag_over_p3.eps} \caption{Non-dimensional part of the thrust potential involving the stagnation pressure $P^\circ$ and back pressure, for $\gamma=1.4$; it is reminded that $j=(\gamma-1)/\gamma$.} \end{center} \end{figure} % % \begin{figure}[ht] \begin{center} \psfrag{X}[t][t][1][0]{$RT^\circ$} \psfrag{Y}[b][b][1][0]{$\left(\frac{2 R T^\circ}{j}\right)^\frac{1}{2}$ [Ns/kg]} \includegraphics[width=3.3in]{RTstag.eps} \caption{Part of the thrust potential involving the stagnation temperature $T^\circ$ neglecting turbulence ($k=0$) with $\gamma=1.4$; it is reminded that $j=(\gamma-1)/\gamma$.} \end{center} \end{figure} % \bibliographystyle{warpdoc} \bibliography{all} \end{document}
{ "alphanum_fraction": 0.6440463703, "avg_line_length": 29.8866559486, "ext": "tex", "hexsha": "7e375e5a1d8fed3d11752a2141cc5fb885286a9e", "lang": "TeX", "max_forks_count": 20, "max_forks_repo_forks_event_max_datetime": "2022-03-04T08:41:55.000Z", "max_forks_repo_forks_event_min_datetime": "2018-07-26T08:17:37.000Z", "max_forks_repo_head_hexsha": "9340a8526bb263d910f79d79e84dcac7aec211b6", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "zhanghuanqian/CFDWARP", "max_forks_repo_path": "model/fluid/doc/Performance_Parameters/report.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "9340a8526bb263d910f79d79e84dcac7aec211b6", "max_issues_repo_issues_event_max_datetime": "2021-11-23T09:21:28.000Z", "max_issues_repo_issues_event_min_datetime": "2020-11-10T11:28:30.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "zhanghuanqian/CFDWARP", "max_issues_repo_path": "model/fluid/doc/Performance_Parameters/report.tex", "max_line_length": 135, "max_stars_count": 29, "max_stars_repo_head_hexsha": "9340a8526bb263d910f79d79e84dcac7aec211b6", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "zhanghuanqian/CFDWARP", "max_stars_repo_path": "model/fluid/doc/Performance_Parameters/report.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-08T21:44:13.000Z", "max_stars_repo_stars_event_min_datetime": "2018-09-13T13:58:18.000Z", "num_tokens": 14258, "size": 37179 }
\chapter{Temporal resolution of pulse detection} \label{ch:timeres} In this chapter we measure the temporal localization precision once the presence of a signal pulse is established. To this end, we simulate events each containing only one signal at a known position. In principle we could use the LNGS data (\autoref{sec:lngsdata}), but we do not know the jitter of the trigger pulse and we may reach a temporal resolution below the sampling period, while in the simulation we know the exact actual temporal location of signals. \section{Event simulation} Every event is the sum of a \SI1{PE} signal and a noise waveform. We do not add a baseline, so the noise has mean zero and the signals taper down to zero. The signals are negative. We use the same scale of the LNGS data; the scale does not affect the results of this study since we are not simulating digitalization. The following paragraphs describe in detail the event simulation procedure. \subsection{Signal generation} \label{sec:toysignal} We generate the signal pulse shape according to the trigger-aligned template from \SI1{PE} laser pulses in Tile~57, see \autoref{sec:cctemplate} and \autoref{fig:template}. The template is sampled at \SI1{GSa/s}, but the simulated events are sampled at \SI{125}{MSa/s}, which is the sampling frequency planned for the DarkSide20k digitizers. The randomly generated signal time is not required to be aligned with either clock. Given the generated temporal position, we round it by excess \emph{and} defect to the \SI{1}{ns} clock tick. Then we downsample the template by averaging samples in groups of~8. This is done twice: once with the groups aligned to the floor-rounded temporal position, once with the ceiling-rounded one. Finally we interpolate linearly between the two downsampled templates. \autoref{fig:interptempl} shows a series of waveforms generated following this procedure. Here averaging before downsampling has the role of an antialias filter. While more refined antialiasing filters exist, a simple average is sufficient for our application. In each simulation event we vary the amplitude of the signal by an additive Gaussian random variable, which has the standard deviation observed in the LNGS data, \SI{2.9}\% of the average \SI1{PE} amplitude. This value is obtained by computing the difference in quadrature between the ``quantile standard deviations'' (see \autoref{sec:fingerplot}) of the \SI{1}{PE} and \SI{0}{PE} peaks in the fingerplot done with a \SI{1.5}{\micro s} average, the same used for the template in \autoref{sec:cctemplate}, and dividing it by the median of the \SI{1}{PE} peak. \begin{figure} \widecenter{\includempl{figinterptempl}} \figcaption{interptempl}{The signal template downsampled from \SI1{GSa/s} to \SI{125}{MSa/s} and translated continuously instead of by discrete steps with linear interpolation.} \end{figure} \subsection{Noise simulation} To study the dependence of the algorithms on noise, we simulate three different noise distributions: Gaussian white noise; noise sampled from the LNGS data; noise sampled from the Proto0 data. \stracka{Eliminare ``A persistence... in Figure 4.3''} The white noise is generated in the simulation. The LNGS noise is sampled from the pre-trigger region of Tile~57 data, the same data used for the signal template, ignoring any event with any sample less than 700 as in \autoref{sec:snrdata}. The Proto0 noise is copied from data collected operating Tile~57 below the breakdown voltage, keeping the whole events without selection. A persistence plot of the Proto0 data is shown in \autoref{fig:hist2dtile155759}. The spectra are shown in \autoref{sec:spectrum}, the autocorrelations in \autoref{fig:autocorrlngs}. The preprocessing applied on the pre-trigger window of LNGS data does not fully reject \SI1{PE} pulses, and spurious \SI1{PE} pulses may appear in the simulation. In the analysis we will use robust statistics, i.e., quantiles, to deal with outliers caused by this or any other unanticipated feature of the data. When the noise waveforms for multiple simulated events are extracted from the same data event, we skip \SI{1}{\micro s} between each waveform segment in the data, to avoid correlations between the simulated events. We downsample the noise in the same way as the signal, by averaging nearby samples. The noise spectra from both sources (LNGS and Proto0) decrease with frequency, so an antialiasing with an average should suffice. The Proto0 data, as is available to us, is pre-downsampled without antialiasing from \SI{250}{MSa/s} to \SI{125}{MSa/s}. \emph{After} downsampling, the noise obtained from data is normalized to the desired variance. The order matters because downsampling with averaging reduces the variance of the noise, see \autoref{fig:noise}. We normalize the variance separately for each \emph{data} event, also fixing the mean to zero, such that in the simulated events the variance has a realistic variation. \stracka{Eliminare ``we normalize... data event''} \begin{figure} \widecenter{\includempl{fignoise}} \figcaption{noise}{The LNGS and Proto0 noise at the original sampling frequency (normalized to zero mean and unit variance) and downsampled.} \end{figure} \subsection{Event layout} Each event is the sum of a noise waveform and a shorter signal waveform. Before the beginning of the signal there is a noise-only region which is chosen long enough for the filters to be in a stationary state when the signal occurs. The length of this region is set to be \SI{2304}{ns}, i.e., longer than the largest filter-length parameter considered in our study, \SI{2048}{ns}. The simulation is repeated for various raw signal to noise ratios (SNR), calculated as the peak height relative to the baseline of the original \SI1{GSa/s} signal template over the noise standard deviation. The reason we consider the signal amplitude at \SI1{GSa/s} and not at the actual sampling frequency of the simulation is because downsampling reduces the peak height. For convenience, we want to keep the definition of signal height comparable at different sampling frequencies. \marginpar{I should use the peak height, averaged over continuous positioning, at the simulation sampling frequency, instead of at \SI{1}{GSa/s}. Then the comparisons at different sampling frequencies would need to adjust both for the reduction of the noise variance and of the peak height, but the definition of SNR would be the standard one.} Simulations with different raw SNR differ only in the multiplicative constant of the noise, so we use exactly the same noise and signal arrays for every SNR to speed up the code. This means that there is no random variation between results obtained at different SNR (or with different filters), keep this in mind if the smoothness of some curves would seem to suggest that the Monte Carlo error is negligible. \stracka{Eliminare ``keep this... is negligible''} \autoref{fig:toyevent} shows a complete example event. \begin{figure} \widecenter{\includempl{figtoyevent}} \figcaption{toyevent}{A simulation event. The dots are the minima of the filters output. The minima are searched in the shaded region only; this makes no difference with high enough SNR like in this example, but in the limit SNR = 0 the minimum fluctuates around uniformly: the search range sets the endpoints of this distribution.} \end{figure} \section{Temporal localization} \label{sec:temploc} To reconstruct the time position of the signals, We run the three filters described in \autoref{sec:filters} (moving average, exponential moving average, cross correlation), and take the minimum (the signals are negative) of the filtered waveform as the location of the signal. We also take the minimum of the unfiltered waveform as a baseline comparison. The minimum of the filter output occurs at a shifted position relative to the signal location, but this is not a problem since the choice of the point of the signal to be taken as reference is arbitrary, and, for each filter, the shift is constant from one event to another. To build the template for the cross correlation filter, we first truncate it as described in \autoref{sec:cctemplate}, and then downsample it in the same way we downsample the signal template and the noise. \begin{figure} \widecenter{\includempl{figtoyfilttempl}} \figcaption{toyfilttempl}{Some cross correlation filter templates for different lengths. It may appear strange that the endpoint on the left has a different height than the endpoint on the right for a given template, since we choose the truncation to maximize the norm; it happens because we downsample \emph{after} truncation.} \end{figure} To allow for a localization more precise than the sampling clock bin, we interpolate the minimum sample and its first neighbors with a parabola. We also try upsampling the waveform to \SI{1}{GSa/s} (with sample repetition) prior to filtering to check if it improves performance. \subsection{Time resolution results} Assuming Proto0 noise spectrum, we simulate 1000 events and repeat the time position reconstruction varying the filter, filter length parameter, and raw SNR. The signal template position is generated uniformly within one clock bin. \autoref{fig:lochist} shows the histograms of the temporal localization for all filters for a choice of SNR and filter length. \begin{figure} \widecenter{\includempl{figlochist}} \figcaption{lochist}{Histograms of the temporal localization error, i.e.\ the difference between the filter output minimum and the signal template start, translated to have zero median, for a choice of SNR and filters length. The error bars mark the \SI{16}\% and the \SI{84}\% quantiles. As definition of temporal resolution we take half the distance between those quantiles. The sampling step is \SI{8}{ns}.} \end{figure} We see that the distribution of reconstructed signal time positions can be non-Gaussian, so to quantify the resolution we use, instead of the standard deviation, half the distance between the \SI{16}\% and \SI{84}\% quantiles, which is equivalent to a standard deviation for a Gaussian, but gives a meaningful measure for the width of the distribution even when it is highly skewed or with heavy tails. \stracka{Non a capo} \autoref{fig:rescurve} shows the temporal resolution thus defined for each filter, filter length, and raw SNR. The exponential moving average has a consistently poor performance compared to the other filters. The cross correlation filter is the best one, with performance improving with filter-length, and at a length of 96 samples (\SI{768}{ns}) is already practically optimal. The moving average can get close to the cross correlation filter by choosing appropriately the number of samples. \begin{figure} \widecenter{\includempl{figrescurve}} \figcaption{rescurve}{Pulse detection temporal resolution for a range of raw SNR and filter lengths. The shaded region marks the sampling step~\SI{8}{ns}. The right endpoint of the cross correlation filter curves is at~\SI{2.6}{ns}.} \end{figure} In the experiment The online processing of the PDM output will happen in two steps: the digitizers must find the signals, then send them to the front end processors (FEPs) for further analysis. The computational resources of the digitizers are limited compared to those available in the FEPs. The exponential moving average can be implemented on the digitizers with few logic resources. The cross correlation with 64 samples could probably be performed with the resources available on the digitizers since a computation with similar complexity was implemented in firmware and run on an evaluation card featuring the same FPGA installed on the DarkSide digitizer boards. The FEPs can and should probably use the best filter, so they would run a long cross correlation filter, since achieving a good temporal resolution may be beneficial for offline analysis. To summarize, out of all the temporal resolution curves the most relevant are: % \begin{itemize} % \item the best time resolution we can achieve with the exponential moving average and moving average; % \item the long cross correlation filters; % \item the 64 samples cross correlation filter. % \end{itemize} % We plot these curves together in \autoref{fig:rescomp}, adding the resolution plots obtained in the best configuration when changing the noise spectrum in the simulation, to show the impact of the noise spectrum on the performances. We note in particular that a different noise spectrum makes a large difference at low SNR. Finally, we plot a curve computed with and without upsampling, which shows that upsampling does not improve significantly the performance. \begin{figure} \widecenter{\includempl{figrescomp}} \figcaption{rescomp}{Pulse detection temporal resolution vs.\ SNR for various filters. The shaded region marks the sampling step~\SI{8}{ns}. The hatched band is the interval of SNR observed in Proto0; the vertical line is the SNR in the LNGS data, after downsampling to~\SI{125}{MSa/s}. Where not specified, the noise is from Proto0.} \end{figure} \section{Data reduction} In this section we study the effect of some data reduction strategies at the digitizer level on the time resolution that can be achieved in subsequent data processing stages. We said that the digitizers must find signals in the waveform stream and send them to the FEPs for further processing. Depending on the background rate, the bandwidth of the connection between the digitizers and the FEPs can be a bottleneck. Two possible ways of reducing the amount of transmitted data are keeping only the minimum number of samples around each signal, and reducing the sampling frequency. Both have an effect on the temporal resolution, which we assess in the next paragraphs. \subsection{Waveform truncation} We repeat the simulation, as in \autoref{sec:temploc}, but this time we use only a fixed smaller number of samples in each event to compute the filter output. We call this selection of samples a ``window''. On the window we run only a long cross correlation filter since that is what would be done on the FEPs. As past and future boundary condition we use zero. We evaluate the filter even after the sample window end because the window can be shorter than the filter. In this study we did not attempt any optimization of the left/right balance of the window. The number of samples to be stored \emph{before} the onset of the signal is driven by the requirement to allow a proper baseline subtraction procedure. However, by applying the zero padding just described, we are implicitly assuming that a proper baseline subtraction procedure has been applied prior to running the filter. Thus, we only focus on determining the number of samples that should be saved \emph{after} the onset of the signal, by considering windows that very skewed to the right. Keep into account that the measure we are looking at, the temporal resolution, does not depend critically on getting the baseline right. While the length of the window is fixed, its placement is not fixed relative to the true signal location. Instead we use the temporal localization with another filter feasible on the digitizers, calibrated to have the median aligned to the beginning of the signal template. The window then extends for a given number of samples to the left and to the right of this localization. \stracka{Non andare a capo} \autoref{fig:windowevent} shows this procedure graphically for a single event. \autoref{fig:windowtempres} shows the temporal resolution versus unfiltered SNR curves for various choices of window length, noise, and filter used to align the window, where for reasons of computation time the latter was computed at a fixed SNR that does not follow the value on the x-axis. \stracka{Eliminare ``where for reasons... x-axis''} \begin{figure} \widecenter{\includempl{figwindowevent}} \figcaption{windowevent}{Left panel: a simulation event filtered with the exponential moving average. Right panel: the same event filtered with a long cross correlation filter, both using the whole waveform and using only the samples in the shaded window, which is centered using the localization from the filter in the left panel.} \end{figure} \begin{figure} \widecenter{\includempl{figwindowtempres}} \figcaption{windowtempres}{Pulse detection temporal resolution with a long cross correlation filter applied only on a short window of samples centered using a shorter cross correlation filter (left panels) or an exponential moving average (right panels). The various curves correspond to different window lengths, while the black dots are the resolution without windowing.} \end{figure} From \autoref{fig:windowtempres} we conclude that it would be necessary to save at least \SI1{\micro s} of waveform after the onset of the signal to avoid degrading the temporal resolution. On the other hand, we also observe that the performances improve quickly to almost optimal ones when increasing the window length. In the top right panel, i.e., with Proto0 noise and centering with an exponential moving average, the resolution does not converge to the value without windowing as the window length increases. This is due to the standard deviation of the distribution of the window center, \SI{17}{Sa}, being not small enough compared to the left window margin, \SI{32}{Sa}. This means that in a non-negligible fraction of cases, the window does not include the leading edge of the signal. We show this problem intentionally to underline the importance of the left/right balance. \subsection{Downsampling} Another way of reducing the data throughput is downsampling. In \autoref{fig:tempresdowns} we show the temporal resolution achieved with a long cross correlation filter at different sampling frequencies. The downsampling is computed averaging nearby samples. So, in other words, we are comparing applying the cross correlation at the full sampling frequency to first applying an antialiasing filter, downsampling and then computing the cross correlation with a downsampled template. We observe that downsampling by a factor of 2 from \SI{125}{MSa/s} to \SI{62}{MSa/s} maintains almost the same temporal resolution, while going to \SI{31}{MSa/s} lowers it visibly. When downsampling a waveform, the variance of the noise is reduced. At each sampling frequency the simulation sets the SNR looking at the standard deviation of the already downsampled noise, so the SNR scales are off by the factor of the noise amplitude reduction. To make the simulations comparable, we should start from a common ``master simulation'' at \SI{1}{GSa/s}, then downsample it various times. Our code does not implement this and repeats the simulation from scratch at each sampling frequency, renormalizing the downsampled noise to unitary variance. To account for this, in \autoref{fig:tempresdowns} we apply a correction factor on the raw SNR before plotting the time resolution results. \begin{figure} \widecenter{\includempl{figtempresdowns}} \figcaption{tempresdowns}{Pulse detection temporal resolution at different sampling frequencies with a cross correlation filter with template length \SI{2048}{ns}. The SNR scale is at \SI{125}{MSa/s}; curves for different sampling frequencies are rescaled horizontally by the factor written in the legend to account for the noise variance reduction with downsampling, as described in the text.} \end{figure} We also check if downsampling is associated to signal to noise ratio degradation in the cross correlation filter output. In \autoref{tab:filtsnrdowns} we report the ratio between SNR after and before filtering. It does not appear to change significantly. \marginpar{Add digitalization to the simulation and make a plot like the one for the sampling frequency but varying the number of bits at \SI{125}{MSa/s}. Don't use the number of bits, use the ratio signal peak over digit which is well defined.} \begin{table} \centering \begin{tabular}{c*4S[table-format=1.1]} \toprule & \multicolumn4c{SNR after over before filtering} \\ \cmidrule(l){2-5} Noise & {\SI{1}{GSa/s}} & {\SI{125}{MSa/s}} & {\SI{62.5}{MSa/s}} & {\SI{31.2}{MSa/s}} \\ \midrule Proto0 & & 3.3 & 3.3 & 3.3 \\ LNGS & 5.6 & 5.5 & 5.7 & 6.0 \\ White & 4.3 & 4.3 & 4.2 & 4.2 \\ \bottomrule \end{tabular} \caption{\label{tab:filtsnrdowns} Ratio of SNR after over before filtering with a cross correlation filter with template length \SI{2048}{ns}. The \SI{125}{MSa/s} column contains the actual SNR ratios of the simulations, while the values for the other sampling frequencies are divided by the noise standard deviation reduction with downsampling relative to \SI{125}{MSa/s} to make them comparable.} \end{table}
{ "alphanum_fraction": 0.772827257, "avg_line_length": 48.4512471655, "ext": "tex", "hexsha": "1ddd93c6f9daf138d14ef7cbc74f32dfdd2f4d5c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0b8672b7dc6a4ec54c5c76ecfbb83edcf34729f7", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "Gattocrucco/thesis", "max_forks_repo_path": "timeres.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0b8672b7dc6a4ec54c5c76ecfbb83edcf34729f7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "Gattocrucco/thesis", "max_issues_repo_path": "timeres.tex", "max_line_length": 97, "max_stars_count": 1, "max_stars_repo_head_hexsha": "0b8672b7dc6a4ec54c5c76ecfbb83edcf34729f7", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "Gattocrucco/thesis", "max_stars_repo_path": "timeres.tex", "max_stars_repo_stars_event_max_datetime": "2021-07-15T07:16:46.000Z", "max_stars_repo_stars_event_min_datetime": "2021-07-15T07:16:46.000Z", "num_tokens": 5018, "size": 21367 }
\documentclass[10pt,twocolumn,letterpaper]{article} \usepackage{cvpr} \usepackage{tgbonum} \usepackage{epsfig} \usepackage{graphicx} \usepackage{amsmath} \usepackage{amssymb} % Include other packages here, before hyperref. \usepackage{graphicx} \usepackage{subcaption} % If you comment hyperref and then uncomment it, you should delete % egpaper.aux before re-running latex. (Or just hit 'q' on the first latex % run, let it finish, and you should be clear). \usepackage[pagebackref=true,breaklinks=true,letterpaper=true,colorlinks,bookmarks=false]{hyperref} \cvprfinalcopy % *** Uncomment this line for the final submission \def\cvprPaperID{****} % *** Enter the CVPR Paper ID here \def\httilde{\mbox{\tt\raisebox{-.5ex}{\symbol{126}}}} % Pages are numbered in submission mode, and unnumbered in camera-ready \ifcvprfinal\pagestyle{empty}\fi \graphicspath{{results/}} \newlength{\imagewidth} \begin{document} %%%%%%%%% TITLE \title{Super-Resolution with GANs} \author{Nathanael Bosch\\ {\tt\small [email protected]} \and Thomas Grassinger\\ {\tt\small [email protected]} \and Jonas Kipfstuhl\\ {\tt\small [email protected]} \and Pierre Springer\\ {\tt\small [email protected]} %\and %Team Member 5\\ %{\tt\small [email protected]} } \maketitle %\thispagestyle{empty} \section{Introduction} We call super-resolution (SR) the task of estimating a high-resolution (HR) image from its low-resolution (LR) counterpart. Recent work with optimization-based methods largely focuses on minimizing the mean squared reconstruction error, which results in high peak signal--to--noise ratios (PSNR) but very smooth pictures. The authors of the paper\cite{LedigChristian2016PSIS} propose using a generative adversarial network (GAN) with an improved loss function. We reimplemented the paper and questioned the purpose of the discriminator and the suggested loss function. \section{GANs} GANs consist of two different networks, a Generator Network and a Discriminator Network. The concept behind this is that the generative network estimates a super-resolved image from its LR version with the goal to become highly similar to real images that the discriminator network fails to distinguish. Therefore we optimize the discriminator network $D_{\Theta_D}$ in an alternating manner along with the generative network $G_{\Theta_G}$ to solve the adversarial min-max problem: \begin{align*} min_{\Theta_G} max_{\Theta_G} &\mathbb{E}_{I^{HR} \backsim p_{\text{train}}(I^{HR})} [\text{log} D_{\Theta_D}(I^{HR})] \\ +&\mathbb{E}_{I^{HR} \backsim p_G(I^{LR})} [\text{log} (1-G_{\Theta_G}(I^{LR}))] \end{align*} The perceptual loss $l^{SR}$ we defined as weighted sum of a content loss and an discriminative loss component: \begin{equation*} l^{SR}=\alpha l^{SR}_{MSE} + \beta l^{SR}_{VGG16_19/i.j} + \gamma l^{SR}_{D} \end{equation*} More precisely, the content loss components are defined as follows: \begin{align*} l^{SR}_{M SE} &= \frac{1}{r^2WH}\sum_{x=1}^{rW}\sum_{y=1}^{rH}(I^{HR}_{x,y}-G_{\theta_G}(I^{LR})_{x,y})^2 \\ l^{SR}_{VGG1619/i,j}&=\frac{1}{W_{i,j}H_{i,j}} \sum_{x=1}^{W_{i,j}}\sum_{y=1}^{H_{i,j}}(\Phi_{i,j}(I^{HR})_{x,y}-\Phi_{i,j}(G_{\Theta_G}(I^{LR}))_{x,y}^2 \end{align*} with downsampling factor $r$ and $W,H$ defining the tensor size. Finally the discriminative loss is defined as follows: \begin{equation*} l^{SR}_{D}=\sum_{n=1}^N -\text{log}D_{\Theta_D}(G_{\Theta_G}(I^{LR})) \end{equation*} \section{Setup} \label{sec:setup} % what we used for our work % \subsection{Dataset} % \label{sec:data} For training we used the PASCAL VOC Dataset\cite{pascal-voc-2012} with more than 10,000 images as well as the NITRE Dataset\cite{Agustsson_2017_CVPR_Workshops} with 800 images. % our datasets % \subsection{Networks} % \label{sec:nets} We used pretrained VGG networks in different configurations. The best results were obtained when we used a VGG1619 network. We also considered a VGG16 network. Although faster at training it did not yield results of equal quality. % the Networks, e.g. VGG16, VGG19, VGG16/19 \section{Results} \label{sec:results} % our results => main section % discriminatro may be omitted % better nets work better % ... % images We investigated on the proposed loss by the authors of~\cite{LedigChristian2016PSIS}, by training multiple networks with only parts of the loss. The reults can be seen in figure~\ref{fig:comp}. \begin{figure*}[h] \centering \subcaptionbox{VGG1619 perceptual, adversarial, image loss}[0.2\linewidth]{% \includegraphics[width=0.15\linewidth, keepaspectratio]{vgg1619_p_a_i} } \subcaptionbox{VGG1619 perceptual, adversarial loss}[0.2\linewidth]{% \includegraphics[width=0.15\linewidth, keepaspectratio]{vgg1619_p_a} } \subcaptionbox{VGG1619 perceptual, image loss}[0.2\linewidth]{% \includegraphics[width=0.15\linewidth, keepaspectratio]{vgg1619_p_i} } \subcaptionbox{VGG1619 perceptual loss}[0.2\linewidth]{% \includegraphics[width=0.15\linewidth, keepaspectratio]{vgg1619_p} } \subcaptionbox{VGG1619 adversarial, image loss}[0.2\linewidth]{% \includegraphics[width=0.15\linewidth, keepaspectratio]{vgg1619_a_i} } \subcaptionbox{VGG1619 image loss}[0.2\linewidth]{% \includegraphics[width=0.15\linewidth, keepaspectratio]{vgg1619_i} } \subcaptionbox{VGG19 perception, adversarial, image loss}[0.2\linewidth]{% \includegraphics[width=0.15\linewidth, keepaspectratio]{vgg19_p_a_i} } \subcaptionbox{VGG16 perception, adversarial, image loss}[0.2\linewidth]{% \includegraphics[width=0.15\linewidth, keepaspectratio]{vgg16_p_a_i} } \caption{Comparison of several loss configurations} \label{fig:comp} \end{figure*} The curves of the PSNR and SSIM (structural similarities) values during training may be seen in figure~\ref{fig:plots}. \begin{figure*}[h] \centering \captionsetup{} \subcaptionbox{}[0.4\linewidth]{% \includegraphics[width=0.4\linewidth, keepaspectratio]{IMG_20180206_113139_679.jpg} } \subcaptionbox{}[0.4\linewidth]{% \includegraphics[width=0.4\linewidth, keepaspectratio]{IMG_20180206_113143_300.jpg} } \subcaptionbox{}[0.4\linewidth]{% \includegraphics[width=0.4\linewidth, keepaspectratio]{IMG_20180206_113147_469.jpg} } \subcaptionbox{}[0.4\linewidth]{% \includegraphics[width=0.4\linewidth, keepaspectratio]{IMG_20180206_113150_955.jpg} } \subcaptionbox{}[0.4\linewidth]{% \includegraphics[width=0.4\linewidth, keepaspectratio]{IMG_20180206_113156_419.jpg} } \subcaptionbox{}[0.4\linewidth]{% \includegraphics[width=0.4\linewidth, keepaspectratio]{IMG_20180206_113200_011.jpg} } \subcaptionbox{}[0.4\linewidth]{% \includegraphics[width=0.4\linewidth, keepaspectratio]{IMG_20180206_113202_817.jpg} } \subcaptionbox{}[0.4\linewidth]{% \includegraphics[width=0.4\linewidth, keepaspectratio]{IMG_20180206_113206_502.jpg} } \caption{Curves of metrics during training} \label{fig:plots} \end{figure*} We observed that those widely used metrics are not sufficient for our purposes. While they did not change anymore during training, the visual appearence still improved. This observation was also made the authors of~\cite{LedigChristian2016PSIS}. \section{Conclusion} \label{sec:conclusion} In our work we achieved very good results using the proposed loss and the residual network. During our analysis we found the perceptual loss to be crucial for achieving high performance, but when used on its own it led to weird artifacts in the output images. The removal of the discriminator had no impact on the resulting image quality in our work, as the generator performs very well with the other parts of the loss. This leads to huge difficultuies in training the GAN. % something about what we learned \appendix %%% Appendix % include images % include graphs { \nocite{*} % also those references without \cite{·} \small \bibliographystyle{ieee} \bibliography{bib} } \end{document}
{ "alphanum_fraction": 0.7469019902, "avg_line_length": 34.8864628821, "ext": "tex", "hexsha": "5737b4c7301a05891499eaa70ba50519bde7ac49", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2018-01-30T13:57:43.000Z", "max_forks_repo_forks_event_min_datetime": "2017-12-16T12:48:26.000Z", "max_forks_repo_head_hexsha": "944ebd076de58ee27ea7d486e1b650c3a349d4a8", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "PierreSp/DL4CV_2017_Final_Project", "max_forks_repo_path": "documentation/report.tex", "max_issues_count": 11, "max_issues_repo_head_hexsha": "944ebd076de58ee27ea7d486e1b650c3a349d4a8", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:14:04.000Z", "max_issues_repo_issues_event_min_datetime": "2018-01-12T10:32:20.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "PierreSp/DL4CV_2017_Final_Project", "max_issues_repo_path": "documentation/report.tex", "max_line_length": 155, "max_stars_count": null, "max_stars_repo_head_hexsha": "944ebd076de58ee27ea7d486e1b650c3a349d4a8", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "PierreSp/DL4CV_2017_Final_Project", "max_stars_repo_path": "documentation/report.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2468, "size": 7989 }
\documentclass[output=paper]{langscibook} \ChapterDOI{10.5281/zenodo.3520575} \title{Tone, orthographies, and phonological depth in African languages} \author{Michael Cahill\affiliation{SIL international}} \abstract{Marking of tone in African orthographies has historically been a challenge, not only for linguistic and analytical reasons, but also because most designers of these orthographies have been educated in non-tonal languages. After a review of lexical vs. grammatical tone, this paper examines various strategies that have been used for marking both lexical and grammatical tone in several East and West African languages, as well as cases in which tone is not marked. The question of the desired phonological depth of an orthography is discussed, especially when applied to tonal processes. Many phonologists do not apply theory more recent than Chomsky and \citet{Halle1968} to orthographies. However, the more recent bifurcation of rules into lexical and postlexical provides a psycholinguistically supported phonological level at which tone marking can be based: the output of the lexical level. Experimental evidence supports this \textsc{lexical} level as more readable than either a \textsc{phonemic} or a \textsc{deep} level. A tonal typology of languages also guides what types of languages more predictably would need lexical tone marking. Recommendations for orthographical implementation are given in the conclusion.} \begin{document} \maketitle \section{Introduction}\label{sec:Introduction:1} Marking of tone in African orthographies was considered problematic even before the 1928 Rejaf Language Conference, where permission was rather grudgingly given to mark tones in Sudanese languages when absolutely necessary: “For tonal representations, the consensus was that only high tones should be marked, with an acute accent, and only if necessary for a particular language” \citep{Miner2003}. One reason for this rather tepid approval was that most developers of orthographies either were Europeans or were educated in European languages, which of course are not tonal. The result was that many writing systems for African languages avoided tone marking, and tone was often not studied in any depth. Matters improved only somewhat two years after Rejaf with a cross-continental proclamation: \begin{quote} In books for Africans, tones, generally speaking, need only be marked when they have a grammatical function, or when they serve to distinguish words alike in every other respect; and even then they may be sometimes omitted when the context makes it quite clear which word is intended. As a rule, it will suffice to mark the high or the low tone only. (\citealt[14, referring to Rejaf and 12 other documents]{IIALC1930}) \end{quote} This guidance sounds strikingly modern, both in what it says and does not say. Note that this statement specifies books “for Africans,” not for foreigners, so it primarily has local literacies in mind. It laudably distinguishes grammatical from lexical tone, and for the latter, advocates what is called “selective tone marking” today -- marking tone only on minimal pairs, and even then, only when they are words likely to be confused in context. Tone marking is still considered a challenge today. It is not uncommon for orthography developers to not mark tone at all, either for principled reasons, or because they cannot deal with it, or because they do not consider it important (see \citealt{Cahill2000} for a critique of omitting all tone markings). This paper begins (\sectref{sec:LexicalVgrammaticalTone:2}) with a review of the distinction between lexical and grammatical tone. \sectref{sec:HowToneMarked:3} examines methods that have been used to represent both lexical and grammatical tone (or not) in various African orthographies. In \sectref{sec:PhonTheoryOrtho:4}, I examine two major topics for assisting decisions in tone marking: the appropriate phonological level for orthographies, and a two-fold typological division of African languages. I close in \sectref{sec:Conclusion:5} with some recommendations for representing tone in African orthographies, and a brief re-examination of the selective tone marking issue. \section{Lexical vs. grammatical tone: Review} \label{sec:LexicalVgrammaticalTone:2} Lexical tone is a difference in pitch that distinguishes one \textit{lexeme} from another. Samples of this are given in \REF{ex:LexicalToneDifferencesNouns:1}.\footnote{I follow a common notation for tone transcriptions that indicates tone levels with various diacritics: á = high, à = low, ā = mid, â = falling, ǎ = rising, and ꜝá = downstepped high. An unmarked tone is generally mid in a 3-level system. Unless indicated by other labeling, phonetic transcriptions are enclosed in square brackets [a], while orthographic representations are in angle brackets 〈a〉. ISO codes for languages are noted in the usual square brackets, e.g., [kma] for Kɔnni in \REF{ex:LexicalToneDifferencesNouns:1}.} \ea Lexical tone differences in nouns\\ \label{ex:LexicalToneDifferencesNouns:1} \ea \langinfo{Kɔnni}{}{\citealt[306]{Cahill2007}}\smallskip\\ %Kɔnni [kma] (Ghana) \citep[306]{Cahill2007}\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{kpááŋ} & \textit{kpá}\,ꜝ\textit{áŋ} & \textit{kpàáŋ}\\ ‘oil’ & ‘guinea~fowl’ & ‘back~of~head’ \\ \end{tabularx} % % % \gll \textit{kpááŋ} \textit{kpáꜝ}\textit{áŋ} \textit{kpàáŋ}\\ % % % ‘oil’ ‘guinea~fowl’ ‘back~of~head’ \\ \ex Mono [mnh] (D. R. Congo) \citep[198]{Olson2005}\smallskip\\ % \gll {\textit{áwá}} {\textit{\=aw\=a}} {\textit{àwà}}\\ % ‘diarrhea’ ‘road’ ‘fear’ \\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{áwá} & {\textit{\=aw\=a}} & {\textit{àwà}}\\ ‘diarrhea’ & ‘road’ & ‘fear’ \\ \end{tabularx} \z\z Grammatical tone, on the other hand, distinguishes one \textit{grammatical category} from another. There are many grammatical categories which can be thus distinguished. Some of the more common ones are given in \xxref{tab:PersonByGrammaticalTone:2}{tab:OtherRelations:7} and \tabref{ex:SingPluralGrammaticalTone:3}. Not every person distinction is differentiated by tone in these or other languages; it is typically only two pronouns of the set that are so distinguished. \ea Person distinguished by grammatical tone\label{tab:PersonByGrammaticalTone:2} \ea Jur Modo [bex] (Sudan) \citep[80]{Persson2004}\smallskip\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{nì} & \textit{ní} & \\ ‘her’ & ‘their’ & \\ \end{tabularx} \ex Lyele [lee] (Burkina Faso) \citep[57]{Kutsch2014}\smallskip\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{ń} & \textit{ǹ} & \\ \textsc{2sg} & \textsc{3sg} & \\ \end{tabularx} \z \z In some languages (e.g., Tarok in \tabref{ex:SingPluralGrammaticalTone:3}), tone distinguishes singulars from plurals in only a subset of nouns, while in others (e.g., Koro Waci, Ndrulo), tone change is the normal method of making plurals from singular nouns. It appears that in the majority of languages which exhibit tone change to mark plural nouns, the plural nouns are in some way higher toned than the singular. However, this is not universal, as will be seen in Karaboro in section \sectref{sec:HowToneMarked:MarkGrammatical:3}. \begin{table} \begin{tabularx}{\textwidth}{lXXX} \lsptoprule & Singular & Plural & Gloss \\\midrule \multicolumn{4}{l}{a. Ndrulo [led] (Uganda) \citep[60]{Kutsch2014}}\\ & \textit{vìnì} & \textit{víní} & ‘his sister/s’ \\ & \textit{djānì} & \textit{djání} & ‘his father/s’ \\ \multicolumn{4}{l}{b. Koro Waci [bqv] (Nigeria) (Rachelle Wenger, p.c.)\ia{Wenger, Rachelle@Wenger, Rachelle}}\\ & \textit{\`{ɪ}s\'{ʊ}r}& \textit{\'{ɪ}s\'{ʊ}r} & ‘he-goat/s’ \\ & \textit{\`{ɪ}t\'{ɔ}m\`{ɪ}} & \textit{\'{ɪ}t\'{ɔ}m\'{ɪ}} & {‘work/s’} \\ & \textit{ìbǔr} & \textit{íbûr} & ‘slime/s’ \\ \multicolumn{4}{l}{c. Tarok [yer] (Nigeria) \citep[90–91]{Longtau2008}}\\ & \textit{ìfàng} & \textit{īfáng} & ‘fingers/s’ \\ & \textit{ìnà} & \textit{īnà} & ‘cow/s’ \\ & \textit{ǹtúng} & \textit{\={n}túng} & ‘hyena/s’ \\ \lspbottomrule \end{tabularx} \caption{Singular/plural nouns distinguished by grammatical tone\label{ex:SingPluralGrammaticalTone:3}} \end{table} Though verbal aspect may be the most common grammatical category distinguished by tone, as in \REF{tab:VerbAspectByGrammaticalTone:4}, other categories are not rare. \REF{tab:LocativeByGrammaticalTone:5} shows an example of tone distinguishing a locative from the bare noun, \REF{tab:SubjObjRelationsByTone:6} exemplifies the syntactic subject/object feature distinguished solely by tone, and \REF{tab:OtherRelations:7} exhibits a miscellany of language-specific grammatical relations distinguished by tone. \ea Verbal aspect distinguished by grammatical tone\label{tab:VerbAspectByGrammaticalTone:4}\\Mbembe [mfn] (Nigeria) \citep{Barnwell1969}\smallskip\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{\`{ɔ}k\^{ɔ}n} ‘you sang’ & \textit{\'{ɔ}k\'{ɔ}n} {‘you should sing’} & \textit{móchí} ‘he will eat’ \\ \textit{\'{ɔ}k\`{ɔ}n} {‘you have sung’} & \textit{\'{ɔ}k}\,ꜝ\textit{\'{ɔ}n} ‘if you sing’ & \textit{mòchí} ‘he will not eat’\\ \end{tabularx} \z \ea Locative distinguished by grammatical tone\label{tab:LocativeByGrammaticalTone:5}\\Fur [fvr] (Sudan) \citep[61]{Kutsch2014}\smallskip\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{bàrù} ‘country’ & \textit{bàrú} ‘in the country’ & \\ \textit{d\'{ɔ}ŋá} {‘hand’} & \textit{d\'{ɔ}ŋà} ‘in the hand’ & \\ \textit{\`{ʊ}t\'{ʊ}} ‘fire’ & \textit{\v{ʊ}t\`{ʊ}} ‘in the fire’ & \\ \end{tabularx} \z \ea Subject/object relations distinguished by grammatical tone\label{tab:SubjObjRelationsByTone:6}\\ Sabaot [spy] (Uganda) \citep[66]{Kutsch2014}\smallskip\\ \textit{kɪbakaac kwààn} ‘his father left him’\\ \textit{kɪbakaac kwáán} ‘he left his father’\\ \z \ea Other relations\label{tab:OtherRelations:7}\\ Lugungu [rub] (Uganda) \citep[10]{Moe1999}\smallskip\\ \textit{mulogo muhandú} ‘an old witch’ \\ \textit{múlógó muhandú} ‘the witch is old’ \\ \textit{múlógô muhandú} ‘the witch, (she) is old’ \\ \z \section{How tone is marked} \label{sec:HowToneMarked:3} Local orthography developers and outside linguists have developed astonishingly varied and sometimes creative ways of marking tone in languages. In contrast, some languages do not mark tone at all, even if they are distinctly tonal, and I start with these. \subsection{No tone marking} \label{sec:HowToneMarked:NoToneMark:1} Here I look at a few languages with no orthographic tone marking at all. Interestingly, sometimes tone marking appears to be crucial to reading, and in other cases less so. The consensus among linguists I have spoken to is that the common way of writing Hausa in \REF{tab:cahill:VariousVerbalAspects:8} (there are other systems) is quite difficult to read. This is especially due to the fact that the \textit{grammatical} tone, as in the example, is not marked, and there are many situations where this ambiguity is impossible to resolve by the context. \ea Various verbal aspects\label{tab:cahill:VariousVerbalAspects:8}\\ Hausa [hau] (Nigeria) \citep{Harley2012}\smallskip\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} [jáá tàfí] & [jáà tàfí] & [jà tàfí]\\ $\langle$ya tafi$\rangle$ & $\langle$ya tafi$\rangle$ & $\langle$ya tafi$\rangle$\\ ‘he went’ & ‘he may go’ & ‘he should go’\\ \end{tabularx} \z Kumam [kdi] (Uganda) has both lexical and grammatical tone: \textit{abe} can mean either ‘an egg’ or ‘a lie,’ while \textit{ebedo} can mean either ‘he lives’ or ‘he lived.’ However, tone is not marked at all in Kumam, and 60\% of people surveyed agreed that it is more difficult to read the Kumam Bible than Bibles in other languages \citep{Edonyu2015}. Kɔnni [kma] (Ghana) orthography does not mark tone. However, in contrast to the above languages, this seems not to make a significant difference in readability (my personal observation). In this language minimal pairs are few, so there is a fairly small functional load for lexical tone. Furthermore, there is very little grammatical tone in the language. People are able to read aloud fluently. \subsection{Marking lexical tone} \label{sec:HowToneMarked:MarkLexical:2} Lexical tone, if it is marked, is marked by diacritics more frequently than not. Rangi [lag] of Tanzania, for example, marks lexical High tone, but only on nouns (e.g., \textit{ikúfa} ‘bone’, \citealt{Stegen2005}). Similarly, Akoose marks High tone (e.g., \textit{edíb} [èdíb] ‘river’) and contours (e.g., \textit{kɔ̂d} [kɔ̂d] ‘age’), but leaves Low unmarked \citep[13]{Hedinger2011}. In a few cases, tone has been marked by punctuation marks before each word, especially in Côte d’Ivoire (e.g., \citealt{Bolli1978}). Examples of the punctuation marks used are displayed in \tabref{tab:cahill:LexicalToneNotationCoteLang:9}. \begin{table} \begin{tabularx}{\textwidth}{XXXXXXXX} \lsptoprule extra high & high & mid & low & extra low & mid-low falling & low-high rising & high-low falling\\\midrule ″CV & ′CV & CV & {}-CV & =CV & CV- & {}-CV’ & ‘CV-\\ \lspbottomrule \end{tabularx} \caption{Lexical tone notation for Côte d’Ivoire languages \citep[58]{Kutsch2014}\label{tab:cahill:LexicalToneNotationCoteLang:9}} \end{table} This system can handle up to five tone levels, necessary in some languages of Côte d’Ivoire. This is exemplified as follows in Attié, which has four contrastive levels of tone (but does not have extra low). \ea \label{ex:AttieContrastiveTone:1} \langinfo{Attié}{}{Matthew 6:30a}\\ \glt 'Pɛte {\textquotedbl}yi {\textquotedbl}fa, 'fa {\textquotedbl}kan'a 'lö {\textquotedbl}a -bë ko fon- 'tshɛn'a tɔ, 'eyipian -Zö -wɔ' sɛn 'e hɛn dzhi ko \ldots \z \subsection{Marking grammatical tone} \label{sec:HowToneMarked:MarkGrammatical:3} Different languages have used a wide variety of strategies for indicating grammatical tone. One strategy is using diacritics, and often these mark a phonetic tone which instantiates a particular grammatical category, as in \REF{tab:DiacriticPhoneticsMeaning:10}, with the Daffo variety of Lis Ma Ron. \ea Diacritic showing both phonetics and meaning\label{tab:DiacriticPhoneticsMeaning:10}\\ Lis Ma Ron [cla] (Nigeria) \citep{Harley2012}\smallskip\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{á} & \textit{à} & \\ ‘you (male)’ & `he' & \\ \end{tabularx} \z Akoose exhibits a somewhat unusual pattern in that the singular and plural nouns for class 9/10 are identical, but the distinction is made by tone on the agreement prefix of the following \textit{verb}: %\begin{stylelsTableHeading} %Table : Displaced diacritics in Akoose \citep[13]{Hedinger2011} %\end{stylelsTableHeading} %\begin{table} %\begin{tabularx}{\textwidth}{XXXX} % \lsptoprule % & phonetic & orthography & gloss\\ % Akoose [bss] & [ngù: \textbf{è}délé] & \textit{nguu} \textbf{\textit{e}}\textit{délé} & ‘the pig is heavy’\\ % (Cameroon) & [ngù: \textbf{é}délé] & \textit{nguu} \textbf{\textit{é}}\textit{délé} & ‘the pigs are heavy’\\ % \lspbottomrule % \end{tabularx} %\end{table} \protectedex{ \begin{exe} \ex \label{ex:DisplacedDiacriticsAkoose:2} \langinfo{Akoose}{}{\citealt[13]{Hedinger2011}} \begin{xlist} \ex \gll [ngù: \textbf{è}délé]\\ \textit{nguu} \textbf{\textit{e}}\textit{délé}\\ \glt ‘the pig is heavy’\\ \ex \gll [ngù: \textbf{é}délé]\\ \textit{nguu} \textbf{\textit{é}}\textit{délé}\\ \glt ‘the pigs are heavy’\\ \end{xlist} \end{exe} } Some languages indicate grammatical tone by letters which are otherwise unused. For example, Gangam [gng] (Togo) marks grammatical tone, not phonetically, but with other symbols to indicate the \textit{meaning}. The imperfective is marked with the letter 〈h〉 and the perfective with an apostrophe 〈’〉 (See \citealt{HigdonEtAl2000}, also \citealt{Roberts2013} for more examples. Phonetic transcription is from Jean Reimer p.c.\ia{Reimer, Jean@Reimer, Jean}). \begin{exe} \ex \label{ex:ToneByUnusedLetters:3} \langinfo{Gangam}{}{\citealt{HigdonEtAl2000}} \begin{xlist} \ex {\itshape N bɛnge' [bɛ́ŋge] Miganganm ya kaanm.} \glt `I learned to read Gangam.' \ex {\itshape N laan bɛngeh [bɛ̄ŋgé] Miganganm ya kaanm nɛ.} \glt `I am learning to read Gangam.' \end{xlist} \end{exe} Similarly, Etung \REF{tab:EtungPronouns} uses 〈h〉 to differentiate pronouns which differ only by tone. \ea Pronouns in Etung [etu] (Nigeria, \citealt{Harley2012})\label{tab:EtungPronouns}\setlength{\multicolsep}{0pt} \begin{multicols}{2}\ea\gll\relax [á]\\ $\langle$ah$\rangle$ \\\glt `they' \ex\gll\relax [à]\\ $\langle$a$\rangle$ \\\glt `he' \z\end{multicols}\z Other languages double some letters to differentiate pronouns which differ only by tone \REF{tab:JurModoLetterDoubling}. \ea Pronouns in Jur Modo [bex] (Sudan, \citealt{Persson2004})\label{tab:JurModoLetterDoubling}\setlength{\multicolsep}{0pt} \begin{multicols}{2}\ea \gll\relax [nì] \\ $\langle$nï$\rangle$ \\\glt ‘her’ \ex \gll\relax [ní] \\ $\langle$nnï$\rangle$ \\\glt ‘their’ \z\end{multicols}\z A number of languages indicate various grammatical tone functions by means of punctuation or other non-alphabetic marks. Karaboro, as displayed in \REF{tab:KaraboroPlurals}, uses a word-final hyphen to indicate plurals (in those cases which are not indicated by a segmental marker), which all happen to end in a low tone. \ea Plurals in Karaboro [xrb] (Burkina Faso, SIL 2009, as cited in \citealt{Roberts2013})\label{tab:KaraboroPlurals}\setlength{\multicolsep}{0pt} \begin{multicols}{3} \ea\gll\relax\ob kāī, kāì\cb \\ $\langle$kai, kai-$\rangle$ \\\glt ‘affair, affairs’ \ex\gll\relax\ob gjɔɔ, gjɔɔ\cb \\ $\langle$jɔɔ, jɔɔ-$\rangle$ \\\glt ‘net, nets’ \ex\gll\relax\ob sààpjé, sàápjè\cb \\ $\langle$saapye, saapye-$\rangle$ \\\glt ‘rabbit, rabbits’ \z\end{multicols}\z The old Ejagham orthography, now changed to a different system, used punctuation extensively to indicate various verbal aspectual forms \REF{tab:cahill:fiaghamOrtho:13}. % % Table : Old Ejagham orthography [etu] (Nigeria \& Cameroon)\\ % % (\citealt{Bird1999b}, corrected by John Watters pc) \begin{table} \begin{tabularx}{\textwidth}{llXl} \lsptoprule Orthographic Rule & Phonetic & Orthography & Gloss\\\midrule colon = \textsc{perfect} & [émè] & $\langle$e:me$\rangle$ & ‘we have swallowed’\\ space = \textsc{perfective} & [èmê] & $\langle$e me$\rangle$ & ‘we swallowed’\\ apostrophe = \textsc{hortative} & [éme] & $\langle$e’me$\rangle$ & ‘let us swallow’\\ hyphen = \textsc{conditional} & [émě] & $\langle$e-me$\rangle$ & ‘when we swallow’\\ no symbol = \textsc{noun} & [èmè] & $\langle$eme$\rangle$ & ‘neck’\\ \lspbottomrule \end{tabularx} \caption{Old Ejagham orthography [etu] (Nigeria \& Cameroon)\\ (\citealt{Bird1999b}, corrected by John Watters, p.c.\ia{Watters, John@Watters, John})\label{tab:cahill:fiaghamOrtho:13}} \end{table} The Bokyi orthography (\tabref{tab:cahill:BokyiOrtho:14}) uses a system that appears rather unusual to most readers in its employment of a variety of non-alphabetic symbols, but it is currently in use. \begin{table} \begin{tabularx}{\textwidth}{XXX} \lsptoprule Phonetic & Orthography & Gloss\\\midrule \ob ǹtsè\cb & $\langle$nce$\rangle$ & ‘going’\\ \ob ǹtsâ\cb & $\langle$n-ca$\rangle$ & ‘I go’\\ \ob ńtsè\cb & $\langle$n/ce$\rangle$ & ‘I went’ \\ \ob \={n}ńtsè\cb & $\langle$nn/ce$\rangle$ & ‘I have gone’\\ \ob ńtʃì ǹ-tsâ\cb & $\langle$n/chi n-ca$\rangle$ & ‘I will go’\\ \ob \={n}ńtséē\cb & $\langle$n*-ce*$\rangle$ & ‘I don't go’\\ \ob ǹdátsèē\cb & $\langle$n*da/ce*$\rangle$ & ‘I didn't go’\\ \ob \={m}ḿbátʃì ǹtsáā\cb & $\langle$n*ba/chi n-ca*$\rangle$ & ‘I will not go’ \\ \lspbottomrule \end{tabularx} \caption{Bokyi [bky] (Nigeria) orthography (Harley, p.c.)\ia{Harley, Matthew@Harley, Matthew}\label{tab:cahill:BokyiOrtho:14}} \end{table} The Bungu language is one of the more complex illustrations of grammatical tone marking. It uses both diacritics and punctuation marks to indicate the interaction of person and aspect in the verbal system. Many words are segmentally identical, and vowel length is not contrastive, putting a greater load on tone. At this point, no lexical tone is marked (though the orthography is still being adjusted), and \tabref{tab:BunguGrammaticalToneMarking} does not give the entire picture of grammatical tone. Other complexities exist as well, such as tone marking of objects. \begin{table} \begin{tabular}{lll} \lsptoprule \multicolumn{2}{l}{Orthography} & Gloss\\\midrule $\langle$w\"{a}kala$\rangle$ & [wàkála] & ‘you bought (recent)’ \\ $\langle$wákala$\rangle$ & [wákála] & ‘he bought (recent)’ \\ $\langle$waakala$\rangle$ & [waːkála] & ‘they bought (recent)’ \\ $\langle$wakala$\rangle$ & [wakála] & ‘they will buy’ \\ $\langle$\^{}wakala$\rangle$ & [wa\v{} kala] & ‘they are buying’ \\ $\langle$:w\"{a}kala$\rangle$ & [wákala] & {‘you have already bought’}\\ $\langle$:wákala$\rangle$ & [wakála] & {‘he has already bought’}\\ $\langle$:waakala$\rangle$ & [wǎːkala] & {‘they have already bought’}\\ $\langle$:nakala$\rangle$ & [nákala] & {‘I have already bought’}\\ $\langle$\^{}nakala$\rangle$ & [nǎkala] & {‘I am buying’}\\ \lspbottomrule \end{tabular} \caption{Bungu [wun] (Tanzania, \citealt{Katterhenrich2016}) \textmd{(Low tone is unmarked)}. Key: Colon: \textsc{completive}; Carat: \textsc{progressive}; Umlaut: \textsc{2sg.subj.past}; Accent: \textsc{3sg.subj.past}; Double vowel: \textsc{3pl.subj.past}.} \label{tab:BunguGrammaticalToneMarking} \end{table} \subsection{Marking both lexical and grammatical tone with diacritics} \label{sec:HowToneMarked:LexicalGrammatical:4} Zinza [zin] (Echizinza) marks both lexical and grammatical tone, with accent marks for high, rising, and falling (see \ref{tab:ZinzaAccentMarking}). \ea Marking both lexical and grammatical tone with accent in Zinza \citep{Matthews2010}\label{tab:ZinzaAccentMarking} \ea lexical tone\smallskip\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{enzóka} & \textit{omuyǎnda} & \\ ‘snake’ & ‘child, youth’& \\ \end{tabularx} \ex grammatical tone\smallskip\\ \begin{tabularx}{\linewidth}{@{}XXX@{}} \textit{aleeba} & \textit{aléeba} & \\ ‘he looked’ & ‘he (habitually) looks’ & \\ \end{tabularx} \z \z \section{Phonological theory and orthography} \label{sec:PhonTheoryOrtho:4} The above discussion has assumed that tones are completely stable, i.e., that underlying tones and surface tones are the same. The question of when or if to mark the results of \textit{tone rules} offers more challenges. For example, in a Bantu language, if a prefixal High tone spreads for three syllables, does one mark the initial prefix syllable alone, or the result of the spreading rule? Or, in west Africa, if underlying tones in a word are /HLH/, but surface as [HꜝHH], what is the appropriate marking? The major question that involves both of these situation is: what \textit{depth} of phonological representation should be the basis for marking tone? This section addresses those questions. Tone studies have advanced in the decades since the 1928 Rejaf conference, especially with Autosegmental Phonology \citep{Goldsmith1976} and Lexical Phonology \citep{Pulleyblank1986}. However, as \citet{Snider2014} notes, many people do not apply phonological theory more recent than \citet{Halle1968} to orthographies. Rather, the main distinctions that most orthographers have in mind are “deep” vs. “shallow” orthographies. However, as we will see, there are other options. A shallow orthography is close to or identical with the surface pronunciation, after most or all of the rules have applied. This has certain consequences and raises the following issues. \begin{itemize} \item The same word will appear with different tone marks \textit{depending on its context}. A “constant word image” (useful for quick word recognition) is not maintained. \item It tends to be cumbersome and hard to read. \citet{Bird1999a} showed that an exhaustive shallow tone marking was actually less readable than no marking in Dschang. \item How are multiple downsteps represented, when the tone can have several decreasing phonetic levels? \end{itemize} A deep orthography represents the sounds before the rules have applied. Very broadly, this is what linguists think of as the “underlying form.” This also has certain consequences. A deep orthography has certain characteristics. \begin{itemize} \item It retains a constant word image, aiding quicker visual recognition of a word; \item It can sometimes be adapted better across dialects, since dialectal differences can be attributed to varying rule application; \item It can be significantly different than any person’s actual pronunciation, including pronunciation in isolation. \end{itemize} If a particular language has few tone processes, there will be little or no difference between a shallow and deep orthography. The above does not exhaust the possibilities; \citet{Bird1999b} and \citet{Roberts2013} give a number of other variations on marking tone. \subsection{Lexical phonology as a useful framework} \label{sec:PhonTheoryOrtho:LexicalPhonology:1} I have mentioned “rules,” but what kind of rules do I mean? There is a rich history of types of rules and their interactions, and one would expect that a narrowing of types of rules would likely be helpful in determining tone orthographies. And so it is. Lexical Phonology (e.g., \citealt{Pulleyblank1986}) is now disfavored as a comprehensive phonological theory, but the notion of \textit{lexical} vs. \textit{postlexical} processes is still invoked in contemporary theories such as Stratal Optimality Theory \citep{Kiparsky2000,Goldsmith2014}. I argue that Lexical Phonology offers a level of psycholinguistic realism that is helpful in determining which level to refer to in deriving orthographic representations. In Lexical Phonology, the output of the lexical level is the \textit{psychologically real} level. This level is similar to but not precisely the same as the “phonemic level” of earlier theories. Following \citet{Snider2014}, I propose that this is the most appropriate phonological level for orthography in general. Specifically for this paper, it is proposed that this level is the most fruitful level in applying the results of tone rules to an orthography. \citet{Snider2014} is a major advocate of the above. One does not need to adopt the entire theory of Lexical Phonology to profit from its main benefits. The main question in dealing with a phonological rule that may make a difference in orthographic representation is whether a rule is lexical or postlexical. Several diagnostic questions can be fruitfully applied to determine this, which I have adapted with minor modification from \citet{Snider2014}. These questions are: \begin{itemize} \item Are there lexical exceptions to the process? \item Does a given process lack phonetic motivation? \item Does the process have to apply across a \textit{morpheme} boundary? (not a word boundary) \end{itemize} If one or more answers to the above are “yes,” then the rule is a lexical rule; write the output of that rule. Other diagnostic questions: \begin{itemize} \item Is the new sound the rule produces a \textit{non}{}-contrastive sound in the language? \item When a given process has applied, do native speakers think that the sound that results is the same as the sound that underwent the process? \item Does the process apply across \textit{word} boundaries? \end{itemize} If one or more answers to the above are “yes”, then the rule is postlexical; write the sound at the level \textit{before} the rule applies. Also, if there is no apparent reason to categorize a rule as lexical, Snider advises assuming it is postlexical. \begin{quote} The above questions are a starting point for tentative decisions that should be held somewhat loosely; all orthographic decisions need to be actually tested.\footnote{\citet[342--343]{Gudschinsky1958} gives an interesting example of a Mazatec man (Mexico) who was quite aware of the results of tone processes \textit{within} words (lexical rules), but insisted that the tones of two particular \textit{phrases} were different, though they were phonetically tonally identical (result of \textit{postlexical} rules).} \end{quote} The experimental evidence from Kabiye (\citealt{Roberts2016neither}) on two tone processes which were marked differentially in test orthographies supports this. The authors tested what they termed the \textit{Lexical Orthography Hypothesis}, that is, that the lexical level (i.e., the output of the lexical phonology) offers the most promising level of phonological depth upon which to base a phonographic tone orthography that marks tone exhaustively.\footnote{“Exhaustive tone marking” is marking the tone on every syllable. There is reason to believe that this is not the most effective way to mark tone, but it was adopted for the purposes of having a more controlled experiment.} They tested 97 tenth-graders with orthographies that represented two tonal processes in three different ways. The rules were: \begin{description} \item[Lexical rule of L-spread:] in the Kabiye verb, the L tone of a prefix spreads rightwards onto a H verb root until it is blocked by a singly linked H tone. This is shown to be lexical because it applies only across a specific morpheme boundary and is limited to within a word. Results of this rule are illustrated in \tabref{tab:cahill:19}. \item[Postlexical rule of HLH plateauing:] a singly linked L between two H tones delinks, and the second H spreads left and has a downstepped register. This is shown to be postlexical by the fact that it applies across word boundaries as well as within words. Results of this rule are illustrated in \tabref{tab:cahill:20}. \end{description} \noindent The researchers tested 3 orthographies: \begin{description} \item[Phonemic:] the pronunciation minus application of any allophonic processes \item[Lexical:] (output of lexical phonology) the phonemic level minus application of any postlexical processes \item[Deep:] (input of lexical phonology) the lexical level minus application of any lexical processes, a morphographic representation. \end{description} \noindent Examples of the orthographic output of these different systems are shown in Tables~\ref{tab:cahill:19} and~\ref{tab:cahill:20}. \begin{table}[p] \begin{tabularx}{\textwidth}{lp{2cm}Ql} \lsptoprule Speech & Deep\newline orthography & Lexical and phonemic orthographies & Gloss\\\midrule {[wélésí-∅]} & 〈wélési〉 & 〈wélésí〉 & ‘listen!’\\ listen-\textsc{imp} & & & \\ {[e-welesí-na]} & 〈ewélésína〉 & 〈ewelesína〉 & ‘he listened’\\ \textsc{3sg-nc1}-listen-\textsc{com} & & & \\ {[te-welesí-na]} & 〈tewélésína〉 & 〈tewelesína〉 & ‘didn’t listen’\\ \textsc{neg}-listen-\textsc{com} & & & \\ \lspbottomrule \end{tabularx} \caption{Low-spread and Kabiye orthographies} \label{tab:cahill:19} \end{table} %\begin{tabularx}{\textwidth}{XXXX} %\lsptoprule %Speech & deep orthography & lexical and phonemic orthographies & gloss\\ %[wélésí-Ø] \\ %listen-IMP & 〈wélési〉 & 〈wélésí〉 & ‘listen!’\\ %[e-welesí-na] \\ %3sgNC1-listen-COM & 〈ewélésína〉 & 〈ewelesína〉 & ‘he listened’\\ %{[te-welesí-na]} %NEG-listen-COM & 〈tewélésína〉 & 〈tewelesína〉 & ‘didn’t listen’\\ %\lspbottomrule %\end{tabularx} \begin{table}[p] \begin{tabularx}{\textwidth}{QQQl} \lsptoprule Speech & Deep and lexical orthographies & Phonemic orthography & Gloss\\\midrule {[s\'{ɛ}-tʊ]} thanks-\textsc{nc9} & 〈s\'{ɛ}tʋ〉 & 〈s\'{ɛ}tʋ〉 & ‘thanks’\\ {[f\'{ɛ}y\'{ɪ}]} there\_is\_no & 〈f\'{ɛ}y\'{ɩ}〉 & 〈f\'{ɛ}y\'{ɩ}〉 & ‘there is not’\\ {[s\'{ɛ}ꜝt\'{ʊ} f\'{ɛ}y\'{ɪ}]} thanks-\textsc{nc9} there\_is\_no & 〈s\'{ɛ}tʋ f\'{ɛ}y\'{ɩ}〉 & 〈s\'{ɛ}’t\'{ʋ} f\'{ɛ}y\'{ɩ}〉 & ‘don’t mention it!’\\ \lspbottomrule \end{tabularx} \caption{HLH plateauing and Kabiye orthographies} \label{tab:cahill:20} \end{table} \begin{table}[p] \begin{tabularx}{\textwidth}{llQ} \lsptoprule Orthography & Lexical L tone spreading & Post-lexical HLH plateauing\\\midrule Phonemic & {Written as pronounced} (easier) & {Written as pronounced} (harder)\\\tablevspace {Lexical} & & {Written without post-lexical processes} (easier)\\\tablevspace {Deep} & Written morphographically \\ (harder) & \\ \lspbottomrule \end{tabularx} \caption{Expected results from three experimental orthographies} \label{tab:cahill:21} \end{table} Note that because of the specific processes chosen, the results of the Low-Spread rule distinguish a Deep orthography from the others. Is the Deep or the Lexical/Phonemic representation better? The results of the HLH Plateauing rule distinguish the Phonemic orthography from the others; is the Phonemic or the Deep/Lexical representation better? \tabref{tab:cahill:21} shows the expected results if the Lexical Orthography Hypothesis is correct. Note that the experiment focused on the oft-neglected domain of \textit{writing} as well as reading. The reader is referred to the paper for full results, but on the whole, the Lexical Orthography Hypothesis was supported. Lexical and Phonemic orthographies worked better in dealing with one tone process, and Lexical and Deep orthographies worked better in dealing with the other tone process. So the Lexical orthography fared well in both processes, while the others did worse in one orthography or the other. Specifically, those writing the Lexical orthography: \begin{itemize} \item scored fewer errors writing an appropriate accent on a vowel than those writing the Deep orthography; \item scored fewer errors writing post-lexical non-automatic downstep than those writing the Phonemic orthography; \item experienced less degradation of performance on a later test than those writing the Deep and Phonemic orthographies; \item were more absorbed with the task of writing accents correctly than those writing the Deep and Phonemic orthographies (though this often caused them to write long vowels incorrectly). \end{itemize} One caveat for the experiment is that there is not universal acceptance by researchers what the underlying (deep) tones of Kabiye actually are. Also, as mentioned before, this experiment focuses on lexical tone, exhaustively marked. Other less exhaustive methods of tone marking were not explored. \subsection{Language typology as a useful guide} \label{sec:PhonTheoryOrtho:LanguageTypo:2} Besides the largely theoretical insights of the Lexical Orthography Hypothesis, another promising tool for deciding how to mark tone is a more typological one. \citet{Kutsch2014} proposes two main types of tone languages. In her terminology, these are “stable tone languages” and “movable tone languages.” \textit{Stable tone languages} are those in which tone rules do not change an underlying tone. They tend to have a cluster of properties: \begin{itemize} \item These languages tend to have shorter words, and more tone levels. \item Tone generally has a heavy functional load, both lexically and grammatically. \item Grammatical tone can be looked at as tone replacement. \item Writing tone on every syllable is possible and straightforward. \item Teaching phonetic tone awareness is (relatively) easier, and a constant word image can be maintained. \end{itemize} Ndrulo and Attié, cited earlier, are examples of stable tone languages. \textit{Movable tone languages} are those in which the tones change according to the context, due to a variety of tone sandhi rules. These also tend to have a cluster of properties which differ from the stable tone languages: \begin{itemize} \item These languages tend to have longer words and fewer tone levels. \item They generally have a lighter load for lexical tone, but often a heavy functional load for grammatical tone. \item Thus it may be less important to mark lexical tone, but it is important that \textit{grammatical} tone distinctions be differentiated. \item Teaching tone awareness could focus on grammatical notions rather than phonetics \end{itemize} Sabaot, Lugungu, and many Bantu languages are examples of movable tone languages. Of course, these language types are prototypical. Many languages do not fall neatly into these categories. However, this can serve as a general first approximation and guide to the type of orthographic tone marking that may prove fruitful. \section{Conclusions and recommendations}\label{sec:Conclusion:5} I conclude this paper with several recommendations -- some definite and others more tentative -- and an open question on “selective tone marking.” \subsection{Recommendations} \label{sec:Conclusions:Recommendations:1} Some practices in orthography development have been confirmed enough by experienced people that I can definitely recommend these. \begin{enumerate}[leftmargin=*] \item First, work with the community! The emphasis in this paper has been on usability of the orthography, based on linguistic factors. However, if for any reason, the language community does not \textit{want} to use a particular orthography, linguistic perfection becomes irrelevant. Various sociopolitical factors that can be relevant in different situations are discussed in \citet{Cahill2014}. \item All decisions on marking tone need to be tested. Unforeseen factors, including incomplete analysis, may result in one’s orthography not being as useable as anticipated. Whether the testing be formal or informal, one needs to check it with people who use the language (see \citealt{Karan2014} for details). \item If it is decided to mark lexical tone in the orthography, mark the output of the lexical level, as discussed in \sectref{sec:PhonTheoryOrtho:LexicalPhonology:1} \item When marking grammatical tone of whatever sort, prioritize marking the \textit{meaning}, not the phonetics (in \citeauthor{Roberts2013}’s \citeyear{Roberts2013} term, “semiographically”). Readers and writers have meaning “in their heads” more than they do the abstract sound. Also, a particular grammatical meaning such as “recent past” may have several phonetic implementations. Figuring these out is a challenging task, but one which, as far as orthography goes, is unnecessary. \item Consider how to \textit{teach} the orthography. Even if speakers know their language is tonal, they often do not have a high awareness of the specifics of tone, let alone how to represent this. Each tone mark should be taught in a separate lesson, just as any consonant or vowel. Also, lexical and grammatical tone should be taught separately. \begin{quote} \ldots\ a tone orthography needs to be accompanied by a well thought-\linebreak through methodology for awareness raising of tonal contrasts and for teaching people to read with the symbols chosen to mark tone in a language. \citep[52]{Kutsch2014} \end{quote}\largerpage[2] \item Make the orthography compatible with electronic devices -- phones, tablets, internet, and computers in general. A Unicode-compatible orthography\footnote{Unicode is the international standard for encoding text in electronic data. Major software assumes user input uses Unicode characters and not a custom font. “Unicode-compatible” in our context means first, that only Unicode characters are used, and second, that they are used in accordance with their defined set of properties. One of those properties is whether it is treated as a “word-forming” character. The usual equals sign ({\ttfamily\char"003D}, Unicode U+003D) is not word-forming, but a shortened equals sign ({\ttfamily\char"A78A}, Unicode U+A78A) has been defined as word-forming.} will be very helpful in the long run. Non-alphabetic symbols (e.g., {\ttfamily * = +}) are appealing for marking grammatical tone, but a warning here is appropriate. The advantages of these marks is that they are already present on the keyboard, they can be written in line with the other characters rather than going back to add a diacritic, and they can mark an easily recognized \textit{meaning} rather than the harder to process phonetics. However, the Unicode \textit{characteristics} of these symbols mean that many programs will not treat them as part of the word, but will split them off from the usual consonants and vowels. Publishing can potentially be hindered if this issue is neglected. \item Finally, consider the writer as well as the reader. Active literacy in a language involves simplicity of writing as well as reading. \end{enumerate} The following are additional factors to consider as possibilities in orthography design, though I do not suggest them as firmly as the above recommendations. These seem reasonable, but have not been proven through practical experience to the extent that the definite recommendations above were. When extra symbols are needed, consider writing them \textit{in line} with other letters, rather than accents above the letter (e.g., \textit{\^{}}\textit{baba}, not \textit{bába}). These are easier to write, since the pencil or pen does not have to be lifted to a separate tier (think of writing an English word like \textit{constitution}, which requires dotting 〈i〉s and crossing 〈t〉s.) More testing and experience is needed, but this may also be possibly easier to read. \begin{quote} Once the initial strangeness of such symbols in the orthography [Bokyi, see \tabref{tab:cahill:BokyiOrtho:14}] has been overcome, and their function is understood, teams learn to use them quite quickly and can get quite excited about them. But teaching phonetic tone-marking using accents is always a struggle here, and very, very few ever master it. (Harley, p.c.)\ia{Harley, Matthew@Harley, Matthew} \end{quote} If both grammatical and lexical tone are to be marked, mark them with different systems. Testing in Togo (Kabiyé language), Roberts marked lexical tone with accents, and grammatical tone with other characters. Roberts comments that readers seemed to “feel” the grammar more than the sound system. \subsection{A closing question} \label{sec:Conclusion:ClosingQuestion:2} One convention that has been fairly widely practiced, but also has been opposed for theoretical reasons, is “selective tone marking.” Selective tone marking applies tone marking only to one word of a minimal tone pair, leaving the other unmarked. Thus if a language has two words [bóbò] and [bóbó], with different meanings, they could be written as 〈bóbo〉 and 〈bobo〉. Selective tone marking thus contrasts with marking tone more extensively or exhaustively. \citet[16]{Wiesemann1989} and \citet[132--133]{Longacre1953} assert that selective tone marking should be avoided. Wiesemann gives the following reason for rejecting selective tone marking: \begin{quote} It should be mentioned here that a system which marks tone where it is minimally different in individual words is not a good system. In such a system, for each individual word one must learn whether it carries a tone mark or not. To mark low tones only on words where there is a minimal tone pair makes the teaching of tone a matter of memory, rather than a matter of rules linked to pronunciation. \end{quote} \citet[133]{Longacre1953} adds the point that selective tone marking “presupposes that one has already made a list of all the words in the language to see which ones are minimal pairs. Such a claim is pretentious since most newly written languages do not have good dictionaries.” Thus two reasons for avoiding selective tone marking are 1) the memory load of having to know all the individual words which must be marked and 2) the improbability of the orthography designer knowing all such word pairs (or triplets, or more) that need to be marked. However, dictionaries that include a large percentage of lexemes in a language are easier to produce now than in past years (\url{http://www.rapidwords.net/}). Also, the preference for rules rather than memorization a) is possibly a relic of Western education, with its bias against rote memorization, and b) ignores the fact that much of our successful (!) English orthography also depends on memorization rather than rules, as the examples in \tabref{tab:VariableEnglishPronunciation} show. \begin{table} \begin{tabular}{llll} \lsptoprule Spelling of 〈ough〉 words & Phonetics & Spelling of 〈ear〉 words & Phonetics\\ \midrule \textit{cough} & [ɑf] & \textit{hear} & [iɹ]\\ \textit{though} & [o] & \textit{heard} & [ɚ]\\ \textit{through} & [u] & \textit{heart} & [ɑɹ]\\ \lspbottomrule \end{tabular} \caption{Variable English pronunciation of same spellings} \label{tab:VariableEnglishPronunciation} \end{table} English orthography is far from being an ideal model, but if such a widely-used orthography can depend so much on memorization, then the argument based on memory loses its force. So a better case can probably be made for selective tone marking than previous scholars have argued. \section*{Acknowledgments} This paper was first presented at the Academic Forum of the Graduate Institute of Applied Linguistics, Dallas, before its presentation at the 48\textsuperscript{th} Annual Conference on African Languages at Indiana University. I am grateful for comments and interesting discussion from both audiences. I also acknowledge valuable comments from two reviewers in preparation for the ACAL proceedings. {\sloppy\printbibliography[heading=subbibliography,notkeyword=this]} \end{document}
{ "alphanum_fraction": 0.7375120128, "avg_line_length": 80.1797945205, "ext": "tex", "hexsha": "a213a368608d19cdc0cb2f6a155a373175761443", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266152799bb64fc2fff528b375dd05ed9e6ae62b", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "langsci/226", "max_forks_repo_path": "chapters/cahill.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "266152799bb64fc2fff528b375dd05ed9e6ae62b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "langsci/226", "max_issues_repo_path": "chapters/cahill.tex", "max_line_length": 1455, "max_stars_count": null, "max_stars_repo_head_hexsha": "266152799bb64fc2fff528b375dd05ed9e6ae62b", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "langsci/226", "max_stars_repo_path": "chapters/cahill.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 13359, "size": 46825 }
\subsection{Trivial bundles and the torus} \subsubsection{Trivial bundles} \subsubsection{The torus} \(S_1 \times S_1\)
{ "alphanum_fraction": 0.752, "avg_line_length": 12.5, "ext": "tex", "hexsha": "4dd76d2b9533ede3991a7118da3f02dd43355fe1", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/geometry/manifoldsTopological/07-05-trivialTorus.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/geometry/manifoldsTopological/07-05-trivialTorus.tex", "max_line_length": 42, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/geometry/manifoldsTopological/07-05-trivialTorus.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 39, "size": 125 }
% % Introduction to MPM % Jim Guilkey/Biswajit Banerjee % 03/25/2003 % \documentclass[10pt]{article} \usepackage[pdftex]{graphicx} %\usepackage[dvips]{graphicx,epsfig} \usepackage{times} \usepackage{amsfonts} \usepackage{amsmath} \usepackage{amssymb} \usepackage{amstext} \topmargin 0.5in \headsep 0pt \headheight 0pt \oddsidemargin 0pt \evensidemargin 0pt \textheight 8.6in \textwidth 6.5in \columnsep 20pt \columnseprule 0.5pt \setlength{\parskip}{0.5em} \raggedright \newcommand{\tn}[1]{\mbox{\bf{#1}}} \newcommand{\sig}{\mbox{\boldmath $\sigma \!\!$ \unboldmath}} \title{An Introduction to the Material Point Method} \author{ \\ J.E. Guilkey \\ Department of Mechanical Engineering \\ University of Utah \\ Salt Lake City, Utah 84112 \\ \\ } \date{} \begin{document} \maketitle \tableofcontents \section{Introduction} The Material Point Method (MPM) as described by Sulsky, et al. \cite{sulskycmame,sulskycpc} is a particle method for structural mechanics simulations. Solid objects are represented by a collection of particles, or ``material points." Each of these particles carries with it information for that part of the solid object that it represents. This includes the mass, volume, position, velocity and stress of that material. MPM differs from other so called ``mesh-free" particle methods in that, while each object is primarily represented by a collection of particles, a computational mesh is also an important part of the calculation. Particles do not interact with each other directly, rather the particle information is interpolated to the grid, where the equations of motion are integrated forward in time. This time advanced solution is then used to update the particle state. An example of two disks initially approaching each other represented by material points on an overlying mesh is show in Figure \ref{fig-disks_init}. \begin{figure}[h] \hspace{1.5in} \scalebox{0.5}{\includegraphics{Figures/disks.pdf}} %\epsfysize=3.5in %\epsfbox{Figures/disks.eps} \caption{\label{fig-disks_init} Initial particle representation of two colliding disks on an overlying mesh.} \end{figure} The method usually uses a regular structured grid as a computational mesh. While this grid, in principle, deforms as the material that it is representing deforms, at the end of each timestep, it is reset to it's original undeformed position, in effect providing a new computational grid for each timestep. The use of a regular structured grid for each time step has a number of computational advantages. Computation of spatial gradients is simplified. Mesh entanglement, which can plague fully Lagrangian techniques, such as the Finite Element Method (FEM), is avoided. MPM has also been successful in solving problems involving contact between colliding objects, having an advantage over FEM in that the use of the regular grid eliminates the need for doing costly searches for contact surfaces\cite{bard}. The choice of MPM over FEM as the C-SAFE structural mechanics method was only in small part for the above mentioned criteria. The primary motivation was the ability to use MPM together with a multimaterial CFD algorithm for solving tightly coupled fluid-structure interaction problems. This capability was first demonstrated in the CFDLIB codes from Los Alamos by Bryan Kashiwa and co-workers. There, as in Uintah, MPM serves as the Lagrangian description of the solid material in a multimaterial CFD code. Certain elements of the solution procedure are based in the Eulerian CFD algorithm, including intermaterial heat and momentum transfer as well as satisfaction of a multimaterial equation of state. The use of a Lagrangian method such as MPM to advance the solution of the solid material eliminates the diffusion typically associated with Eulerian methods. \section{Algorithm} While a more detailed description of MPM can be found in \cite{sulskycpc}, the algorithm is laid out here. The equations of motion are cast in the form: \begin{eqnarray} \tn{M}_g \cdot \tn{a}_g &=& \tn{Fext}_g - \tn{Fint}_g \label{newton2} \end{eqnarray} where $\tn{M}_g$ is the mass matrix, $\tn{a}_g$ is the acceleration vector, $\tn{Fext}_g$ is the external force vector (sum of the body forces and tractions), and $\tn{Fint}_g$ is the internal force vector resulting from the divergence of the material stresses. In general, $\tn{M}_g$ is a large, sparse matrix. In practice, and in what follows here, a ``lumped" mass matrix is used, which only has entries on the diagonal, and is thus represented as a column matrix. The solution procedure begins by interpolating the particle state to the grid, to form $\tn{M}_g$, $\tn{Fext}_g$, and to get a velocity on the grid $\tn{v}_g$. These quantities are calculated at each grid node by the following equations: \begin{eqnarray} \tn{M}_i &=& \sum_{p} S_{ip} m_p \label{expinterpolatemass} \end{eqnarray} \begin{eqnarray} \tn{v}_i &=& \frac{\sum\limits_{p} S_{ip} m_p \tn{v}_p}{\tn{M}_i} \label{expinterpolatevel} \end{eqnarray} \begin{eqnarray} \tn{Fext}_i &=& \sum_{p} S_{ip} \tn{Fext}_p. \label{expinterpolateFext} \end{eqnarray} $m_p$ is the particle mass, $\tn{v}_p$ is the particle velocity, and $\tn{Fext}_p$ is the external force on the particle. The external force on the particle is generally an applied load of some type. In Equation \ref{expinterpolatevel}, the numerator is the nodal momentum, which is then divided by the nodal mass to get a velocity. $S_{ip}$ is a ``shape function" for the $ith$ node evaluated at $\tn{x}_p$. Traditionally, the shape functions are multiplicative combinations of one dimensional tent functions, as shown in Figure \ref{fig-Sip}. The shape functions serve to distance weight the contribution of each particle to the grid nodes. \begin{figure}[bh] \hspace{1.75in} \scalebox{0.7}{\includegraphics{Figures/Sip.pdf}} %\epsfysize=2.5in %\epsfbox{Figures/Sip.eps} \caption{\label{fig-Sip} One dimensional linear shape function, $S(x)$.} \end{figure} At this point, a velocity gradient, $\nabla \tn{v}_p$ is computed at each particle using the grid velocities $\tn{v}_g$: \begin{eqnarray} \nabla \tn{v}_p = \sum_i \tn{G}_{ip} \tn{v}_I \label{velgrad} \end{eqnarray} \noindent where $\tn{G}_{ip}$ is the gradient of the $ith$ node's shape function, evaluated at $\tn{x}_p$. A one dimensional example of $\tn{G}_{ip}$ is shown in Figure \ref{fig-Gip}. Note that in going to multiple dimensions, the $\tn{G}_{ip}$ are found by taking gradients of the multidimensional $S_{ip}$ NOT by forming multiplicative combinations of the one-dimensional $\tn{G}_{ip}$. \begin{figure}[h] \hspace{1.75in} \scalebox{0.7}{\includegraphics{Figures/Gip.pdf}} %\epsfysize=2.5in %\epsfbox{Figures/Gip.eps} \caption{\label{fig-Gip} One dimensional linear shape function derivative, $G(x)$.} \end{figure} This velocity gradient is used as input to a constitutive model (stress-strain relationship) which is evaluated at each particle. The specifics of this calculation are dependent on the constitutive model. An example of a simple elastic material model is described in the appendix. The result of this calculation is the Cauchy stress at each particle, $\sig_p$. With this, the internal force due to the divergence of the stress is calculated: \begin{eqnarray} \tn{Fint}_g &=& \sum_{p} \tn{G}_{ip} \sig_p v_p \end{eqnarray} \noindent where $v_p$ is the particle volume. The internal force can be thought of as the force that holds a material together. For a given deformation, this force is larger for stiffer materials. Everything is now available to solve Equation \ref{newton2} for $\tn{a}_g$. With that, the backward Euler method is used for all time integrations. A convective grid velocity $\tn{v}^L_g$ is computed: \begin{eqnarray} \tn{v}^L_g &=& \tn{v}_g + \tn{a}_g dt \end{eqnarray} While the following calculation is never carried out, in principal, the nodes of the grid also move with that convective velocity: \begin{eqnarray} \tn{x}^L_g &=& \tn{x}_g + \tn{v}^L_g dt \label{expdefgrid} \end{eqnarray} During this part of the computation, the particles move with the deforming grid. Their position and velocity is explicitly updated by: \begin{eqnarray} \tn{v}_p (t + dt) &=& \tn{v}_p (t) + \sum_{i} S_{ip} \tn{a}_i dt \end{eqnarray} \begin{eqnarray} \tn{x}_p (t + dt) &=& \tn{x}_p (t) + \sum_{i} S_{ip} \tn{v}^L_i dt \end{eqnarray} This completes one timestep. Note that not carrying out the calculation in \ref{expdefgrid} explicitly has the effect of resetting the deformed grid to it's undeformed position at the end of the timestep cycle. As with all explicit time integration methods, a timestep size limit must be enforced such that $dt < dx/(|\tn{v}_p| + c)$ for all particles, where $dx$ is the computational grid spacing and $c$ is the speed at which stress waves propagate through the material. Failure to adhere to this condition will cause the solution to become unstable and blow up. The material wavespeed depends on the material model used, as well as on the particular parameters chosen for that model. Specifics of calculating the wavespeed are given in the appendix. \bibliographystyle{unsrt} \bibliography{Bibliography} \appendix \section{Hyperelastic Material Models} The subject of modeling the response of materials to deformation is a subject that has filled numerous textbooks. Therefore, rather than attempt to condense these volumes, here the reader will be simply be given a simple material response model. Other more complex material response models can be interchanged in the framework discussed above quite readily. The author has come to prefer a class of models known as hyperelastic models. What this means is that the stress response of these materials is derived from a strain energy function. A strain energy function gives a relationship between the state of deformation that a material is in, and the amount of stored strain energy that this material has. This is akin to the familiar relationship for the stored energy in a spring, $W=\frac{1}{2} k dx^2$ where k is the spring constant, and $dx$ is the distance that the spring has been compressed or extended. One such strain energy function is given by: \begin{eqnarray} W &=& \frac{\lambda}{4}(J^2-1) - (\frac{\lambda}{2}+\mu) lnJ + \frac{\mu}{2}(trace(\tn{F}^T\tn{F} - 3) \end{eqnarray} from which the following relationship for the stress can be derived: \begin{eqnarray} \sig &=& \frac{\lambda}{2}(J-\frac{1}{J})\tn{I} + \mu (\tn{F}\tn{F}^T) - \tn{I}) \label{stress} \end{eqnarray} where $\lambda$ and $\mu$ are material constants, while $J$ and $\tn{F}$ describe the state of deformation. These will be defined shortly. In the Algorithm section, the calculation of the velocity gradient, $\nabla \tn{v}_p$ is given in Equation \ref{velgrad}. Starting from there, we can then compute an increment in the deformation gradient, $\tn{F}(dt)$ by: \begin{eqnarray} \tn{F}(dt) &=& \nabla \tn{v}_p dt + \tn{I}. \end{eqnarray} This increment in the deformation gradient can then be used to compute a new total deformation gradient using: \begin{eqnarray} \tn{F}(t+dt) &=& \tn{F}(dt) \tn{F}(t). \end{eqnarray} Note that the initial (t=0) deformation gradient is simply the identity, i.e. $\tn{F}(0) = \tn{I}$. Now with the deformation gradient, one can compute $J$ by: \begin{eqnarray} J &=& det(\tn{F}(t+dt)). \end{eqnarray} Note that $J$ represents the volumetric part of the deformation. Specifically, it is the ratio of the current volume of an element of material to it's original volume. Similarly, we can define an increment in $J$ as: \begin{eqnarray} J_{inc} &=& det(\tn{F}(dt)) \end{eqnarray} which is the ratio of the current volume of an element of material to it's volume at the previous timestep. Thus we can write: \begin{eqnarray} v_p(t+dt) &=& J_{inc} v_p(t). \end{eqnarray} Elastic material properties are frequently given in terms of bulk and shear moduli, or $\kappa$ and $\mu$. The shear is sometimes denoted by $G$. The shear modulus $\mu$ appears in Equation \ref{stress} above. $\lambda$ can be computed from $\kappa$ and $\mu$ by: \begin{eqnarray} \lambda &=& \kappa - \frac{2}{3}\mu. \end{eqnarray} Lastly, based on material properties $\lambda$ and $\mu$, a material wavespeed can be computed: \begin{eqnarray} c^2 &=& (\lambda + 3 \mu)\frac{m_p}{v_p}. \end{eqnarray} This wavespeed can be used in computing the timestep size as described above. \section{Other Material Models In the UCF} Other material models implemented into the Uintah Computational Framework (UCF) have been chosen for three purposes: \begin{itemize} \item To verify the accuracy of the material point method (MPM) and to validate the coupling between the computational fluid dynamics code (ICE) and MPM. \item To model the elastic-plastic deformation of the steel container and the consequent damage in the regimes of both high and low strain rates and high and low temperatures. \item To model the polymer bonded explosive contained in the container under various strain rates and temperatures. \end{itemize} \subsection{Material models for the validation of MPM} The models that have been implemented for the verification of MPM are: \begin{itemize} \item Isotropic hypoelastic model using the Jaumann rate of stress. \begin{enumerate} \item MPM predictions have been compared with exact results for thick cylinders under internal pressure for small strains, three-point beam bending, etc. \item MPM predictions for the strain/stress contours for a set of disks in contact have been found to match experimental results. \end{enumerate} \item Isotropic hyperelastic material models for Mooney-Rivlin rubber and a modified compressible Neo-Hookean material. Isotropic strain hardening plasticity for the Neo-Hookean material. \begin{enumerate} \item A billet compression problem has been simulated using MPM and the results have been found to closely match finite element simulations. \item MPM simulations for a thick cylinder under internal pressure with plastic deformation (perfect plasticity) compare well with the exact solution. \end{enumerate} \end{itemize} \subsection{Material models for the container} The material model for the steel container is used to determine the state of stress in the container for an applied deformation rate and deformation gradient at each material point. The strain rates can vary from $10^{-3}$/s to $10^6$/s and temperatures in the container can vary from 250 K to 1000 K. Plasticity dominates the deformation of the container during the expansion of the explosive gases inside. At high strain rates the volumetric response of the container is best obtained using an equation of state. After the plastic strain in the container has reached a threshold value a damage/erosion model is required to rupture the container. Two plasticity models with strain rate and temperature dependency are the Johnson-Cook and the Mechanical Threshold Stress (MTS) models. The volumetric response is calculated using a modified Mie-Gruneisen equation of state. A damage model that ties in well with the Johnson-Cook plasticity model is the Johnson-Cook damage model. The erosion algorithm either removes the contribution of the mass of the material point or forces the material point to undergo no tension or shear under further loading. The stress update at each material point is performed using either of the two methods discussed below. \begin{itemize} \item Isotropic Hypoelastic-plastic material model using an additive decomposition of the rate of deformation tensor. \begin{enumerate} \item The rate of deformation tensor a material point is calculated using the grid velocities. \item An incremental update of the left stretch and the rate of rotation tensors is calculated. \item The stress and the rate of deformation are rotated into the material coordinates. \item A trial elastic deviatoric stress state is calculated. \item The flow stress is calculated using the plasticity model and compared with the vonMises yield condition. \item If the stress state is elastic, an update of the stress is computed using the Mie-Gruneisen equation of state or the isotropic hypoelastic constitutive equation. \item If the stress state is plastic, all the strain rate is considered to the plastic and an elastic correction along with a radial return step move the stress state to the yield surface. The hydrostatic part of the stress is calculated using the equation of state or the hypoelastic constitutive equation. \item A scalar damage parameter is calculated and used to determine whether material points are to be eroded or not. \item Stresses and deformation rates are rotated back to the laboratory coordinates. \end{enumerate} \item Isotropic Hyperelastic-plastic material model using a multiplicative decomposition of the deformation gradient. \begin{enumerate} \item The velocity gradient at a material point is calculated using the grid velocities. \item An incremental update of the deformation gradient and the left Cauchy-Green tensor is calculated. \item A trial elastic deviatoric stress state is calculated assuming a compressible Neo-Hookean elastic model. \item The flow stress is calculated using the plasticity model and compared with the vonMises yield condition. \item If the stress state is elastic, an update of the stress is computed using the Mie-Gruneisen equation of state or the compressible Neo-Hookean constitutive equation. \item If the stress state is plastic, all the strain rate is considered to the plastic and an elastic correction along with a radial return step move the stress state to the yield surface. The hydrostatic part of the stress state is calculated using the Mie-Gruneisen equation of state or the Neo-Hookean model. \item A scalar damage parameter is calculated and used to determine whether material points are to be eroded or not. \end{enumerate} \end{itemize} The implementations have been tested against Taylor impact test data for 4340 steel and HY 100 steel as well as one-dimensional problems which have been compared with experimental stress-strain data. At large tensile strains, material points tend to separate from the main body. This issue is currently being explored and solutions are being sought in the framework of MPM. \subsection{Material models for the explosive} The explosive is modeled using the ViscoSCRAM constitutive model. Since large deformations or strains are not expected in the explosive, a small strain formulation has been implemented into the UCF. The model consists of five generalized Maxwell elements arranged in parallel, crack growth, friction at the crack interfaces and heating due to friction and reactions at the crack surfaces. The implementation has been verified with experimental data and found to be accurate. \end{document}
{ "alphanum_fraction": 0.7335603746, "avg_line_length": 37.8880455408, "ext": "tex", "hexsha": "9874618ec37a94014ff51f51c64769e01f65d888", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "fa1bf819664fa6f09c5a7cd076870a40816d35c9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "abagusetty/Uintah", "max_forks_repo_path": "doc/Components/MPM/MPMIntroduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "fa1bf819664fa6f09c5a7cd076870a40816d35c9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "abagusetty/Uintah", "max_issues_repo_path": "doc/Components/MPM/MPMIntroduction.tex", "max_line_length": 76, "max_stars_count": null, "max_stars_repo_head_hexsha": "fa1bf819664fa6f09c5a7cd076870a40816d35c9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "abagusetty/Uintah", "max_stars_repo_path": "doc/Components/MPM/MPMIntroduction.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5158, "size": 19967 }
\section{Results} \label{sec:04-results} \subsection{Qualitative results} The easiest and most effective way to show the results of a global motion estimation pipeline is to use qualitative information, as the reader will understand it straightforwardly. One of the operations that shows intuitively the implications of global motion estimation is camera motion compensation. In short, if the video is recorded by a camera which is moving, we will see the two types of motion explained in \cref{sec:01-intro}: apparent and real. The aim of camera-motion compensation is to detect and remove the apparent motion which is caused only by the (ego)motion of the camera. To show the results obtained we reported in image \cref{fig:qualitative-results} an example in which the reader can observe: \begin{itemize} \item the previous frame in \cref{fig:prev-frame}; \item the next frame in \cref{fig:curr-frame}; \item the compensated frame, obtained by compensating camera motion in the previous frame, in \cref{fig:compensated}; \item the absolute difference between next and previous frame in \cref{fig:diff-curr-prev}, which gives an idea of how strong the motion in which parts of the image is; \item the motion field generated by the camera, as returned by our estimation procedure in \cref{fig:est-mf}; \item the absolute difference between the next frame and the compensated one in \cref{fig:diff-curr-comp}. \end{itemize} \begin{figure*}[!h] \begin{subfigure}[b]{0.3\textwidth} \centering \includegraphics[width=1\textwidth]{../assets/images/pan240-prev-frame.png} \caption{Previous frame.} \label{fig:prev-frame} \end{subfigure} \hfill \begin{subfigure}[b]{0.3\textwidth} \includegraphics[width=1\textwidth]{../assets/images/pan240-curr-frame.png} \caption{Next frame.} \label{fig:curr-frame} \end{subfigure} \hfill \begin{subfigure}[b]{0.3\textwidth} \includegraphics[width=1\textwidth]{../assets/images/pan240-compensated.png} \caption{Compensated previous frame.} \label{fig:compensated} \end{subfigure} \vspace{0.5em} \begin{subfigure}[b]{0.3\textwidth} \centering \includegraphics[width=1\textwidth]{../assets/images/pan240-diff-curr-prev-1.png} \caption{Absolute difference between current and previous frame (white areas are where we detect motion).} \label{fig:diff-curr-prev} \end{subfigure} \hfill \begin{subfigure}[b]{0.3\textwidth} \includegraphics[width=1\textwidth]{../assets//images/pan240-camera-motion-1.png} \caption{Global motion field estimated by our procedure, it corresponds to the motion generated by the camera.} \label{fig:est-mf} \end{subfigure} \hfill \begin{subfigure}[b]{0.3\textwidth} \includegraphics[width=1\textwidth]{../assets/images/pan240-diff-curr-comp.png} \caption{Absolute difference between current and compensated frame, the only object in white is the only actually moving.} \label{fig:diff-curr-comp} \end{subfigure} \caption{Qualitative results.} \label{fig:qualitative-results} \end{figure*} \paragraph{How to interpret qualitative results} We should be able to notice in the results that the motion that we detect between the two frames (previous and next) is significantly large and noticeable. In fact, by using the absolute difference between frames, we can see that a big part of the visual field seems to be moving in \cref{fig:diff-curr-prev}. However, by computing the motion generated by the camera egomotion, we find out that most of the shift we register is apparent. By estimating the motion generated by the camera, which is reported in \cref{fig:est-mf}, we can compensate the apparent motion in the previous frame. When we compensate the previous frame we \textit{remove} all the motion due to apparent motion; therefore, if we compare the current frame and the compensated frame (see \cref{fig:diff-curr-comp}) we are able to isolate real motion. The results shown are consistent since the video from which the frames are taken was recorded with the camera moving in the horizontal direction while the person in the video was walking. \subsection{Quantitative results} To perform a numeric quantitative analysis of the results produced by the algorithm we used, once again, the compensation of the previous frame. Put it simply, in a video sequence where there is no real motion, but only camera motion, the compensation of the apparent motion should be enough to make the current and the compensated frame identical. Therefore, in \cref{tab:psnr}, we present some PSNR values for different video sequences. We annotated the table with some considerations about the videos, as PSNR value is influenced also by properties of the video like the strength of motion or the presence/absence of real motion. For instance, in the case where there are medium or big objects, like the last two entries of the table, the PSNR turns out to be low, since the model is not able to distinguish between object and camera motion. Better results can be observed in the first two entries of the table, where the moving object are considerably smaller with respect to the background, and the background presents some complex pattern, which enables the algorithm to better detect its motion. \begin{table*}[!t] \begin{center} \begin{tabular}{ll|rrrr} \toprule Video & Properties & Average & Variance & Maximum & Minimum \\ \midrule \texttt{pan240.mp4} & Fast motion & \(22.724\) & \(5.125\) & \(27.802\) & \(17.981\) \\ \texttt{coastguard\_qcif.mp4} & Two object moving, background moving & \(22.733\) & \(2.194\) & \(26.875\) & \(15.158\) \\ \texttt{foreman.mp4} & Big object moving, still background & \(19.677\) & \(18.443\) & \(30.436\) & \(11.746\) \\ \texttt{numeri\_del\_piero.mp4} & Medium object moving, moving background & \(19.072\) & \(13.642\) & \(47.722\) & \(16.323\) \\ \bottomrule \end{tabular} \caption{PSNR results on some meaningful video samples.} \label{tab:psnr} \end{center} \end{table*}
{ "alphanum_fraction": 0.7230106841, "avg_line_length": 61.4803921569, "ext": "tex", "hexsha": "783661050b5e8ff381342796758d2c44e7a4ec2b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "798b70ccc23ac6d6c9d25119db22d346c965faca", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Samaretas/global-motion-estimation", "max_forks_repo_path": "docs/report/sections/4_results.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "798b70ccc23ac6d6c9d25119db22d346c965faca", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Samaretas/global-motion-estimation", "max_issues_repo_path": "docs/report/sections/4_results.tex", "max_line_length": 358, "max_stars_count": null, "max_stars_repo_head_hexsha": "798b70ccc23ac6d6c9d25119db22d346c965faca", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Samaretas/global-motion-estimation", "max_stars_repo_path": "docs/report/sections/4_results.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1581, "size": 6271 }
\filetitle{!input}{List of input parameters or variables}{sstatelang/input} \paragraph{Syntax} \begin{verbatim} !input parameter_or_variable_name, parameter_or_variable_name, parameter_or_variable_name, ... \end{verbatim} \paragraph{Description} \paragraph{Example}
{ "alphanum_fraction": 0.7373737374, "avg_line_length": 16.5, "ext": "tex", "hexsha": "eb586c00d048025f54d7436a5e2041ffaaa0913c", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2022-01-17T07:06:39.000Z", "max_forks_repo_forks_event_min_datetime": "2022-01-17T07:06:39.000Z", "max_forks_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_forks_repo_path": "-help/sstatelang/input.tex", "max_issues_count": 4, "max_issues_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_issues_repo_issues_event_max_datetime": "2020-09-02T10:40:25.000Z", "max_issues_repo_issues_event_min_datetime": "2017-03-28T08:13:20.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_issues_repo_path": "-help/sstatelang/input.tex", "max_line_length": 79, "max_stars_count": 1, "max_stars_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_stars_repo_path": "-help/sstatelang/input.tex", "max_stars_repo_stars_event_max_datetime": "2017-12-06T13:38:38.000Z", "max_stars_repo_stars_event_min_datetime": "2017-12-06T13:38:38.000Z", "num_tokens": 72, "size": 297 }
\section{Model Transformations for \SLCO} \label{sec:reusable-correct-transformations:model_transformations} DMSLs allow designers to reason at a high level of abstraction, and therefore, DSML models do not include many implementation details. The main goal of refining model transformations is to add more details to the model, thus bringing it closer to its implementation. To generate code (such as an \NQC executable) from an \SLCO model, a number of endogenous model transformations have been designed and implemented. By design, each model transformation transforms only a specific small part of the input model, because small transformations can be easily applied, composed, implemented, and analyzed. We have composed sequences of transformations for several target languages. Our correctness criterion guarantees that every intermediate model, including the last model in the sequence, has the same properties as the source model. Furthermore, the transformations are designed and composed such that the very last \SLCO model in the chain contains all implementation details. \subsection{Reusability of Model Transformations} \label{subsec:reusable-correct-transformations:reusability} Table~\ref{tab:endogenous_model_transformations} lists eleven of thirteen endogenous model transformations that are defined to refine \SLCO models. The other two transformations deal with time and are not discussed in this chapter. A detailed discussion of all these transformation is given in Section~\ref{sec:slco:endogenous}. There are two ways in which these transformations can be reused. First, a model transformation can be applied multiple times within the same sequence of transformations, as indicated in the second column of Table~\ref{tab:endogenous_model_transformations}. In practice, the most reused model transformations are the \emph{Clone Classes} transformation, which can be used to clone certain classes, and the \emph{Remove Classes} transformation, which can be used to remove all classes that have no instances. They ensure that models adhere to the constraints imposed by most of the other transformations and are therefore crucial for the successful composition of transformations. Second, a model transformation can be applied in multiple sequences leading to different target languages, as indicated in the third column. This type of reuse is less common, but supporting other target languages with similar semantic gaps would automatically lead to more reuse. The fourth column is discussed in Section~\ref{sec:reusable-correct-transformations:correctness_of_transformations}. It shows the number of proof obligations that must be handled to prove the correctness of each transformation. \begin{table*}[hbt] \centering \begin{tabular}{|l|c|c|c|} \hline \rowcolor[gray]{.9} & \textbf{Reused within} & \textbf{Reused for different} & \textbf{Number of proof} \\ \rowcolor[gray]{.9} \multirow{-2}{*}{\textbf{Transformation}} & \textbf{sequences} & \textbf{target languages} & \textbf{obligations} \\ \hline Bidirectional to & \mr{2}{no} & \mr{2}{yes} & \mr{2}{1} \\ Unidirectional & & & \\ \hline Clone Classes & yes & yes & 1 \\ \hline Exclusive Channels & yes & no & 1 \\ \hline Identify Channels & no & no & 1 \\ \hline Lossless to Lossy & no & no & 74 \\ \hline Merge Channels & no & no & 1 \\ \hline Merge Objects & yes & no & 13 \\ \hline Names to Arguments & no & no & 1 \\ \hline Remove Classes & yes & yes & 1 \\ \hline Strings to Integer & yes & no & 1 \\ \hline Synchronous to & \mr{2}{no} & \mr{2}{no} & \mr{2}{4 and 34} \\ Asynchronous & & & \\ \hline \end{tabular} \caption{Endogenous model transformations} \label{tab:endogenous_model_transformations} \end{table*} Because of their size and complexity, it is not possible to consider all transformations from Table~\ref{tab:endogenous_model_transformations}. Instead, we select the two variants of the \emph{Synchronous to Asynchronous} transformation to illustrate our approach. The difference in complexity of these two variants illustrates that more generic transformations employ more involved communication protocols for handling the introduced changes. One should search for the strongest possible constraints on the input models for such a transformation~\cite{SLCOexploring2011}. These constraints shall be realized in separate transformation steps that precede the more complex one in the transformation chain. This way, many unnecessary details are removed from the core part of the transformation. The simple variant of the \emph{Synchronous to Asynchronous} transformation also described below, although simple, is still complex enough to illustrate all the details of our approach. \subsection{Refining Synchronous Communication} \label{subsec:reusable-correct-transformations:sync_to_async} Synchronous communication is a typical example of a construct at a high level of abstraction that is often present in formal modeling languages. General-purpose programming languages, however, do not offer this concept. While \SLCO allows for syn\-chro\-nous communication, the communication between controllers on the Lego Mindstorms platform is asynchronous. Therefore, synchronization should be realized with asynchronous interaction, introduced by correctly defined model transformations built around a properly defined communication protocol. \clearpage We defined two different transformations, \TSim and \TGen, to replace one of the synchronous channels in a model by an asynchronous channel. To keep the observable behavior of the modeled system intact, this change requires and triggers further changes of the related classes, state machines, and transitions. Transformation~\TSim applies to a restricted subset of models, but is simple and does not greatly increase the complexity of the produced model. In contrast, transformation~\TGen can be applied to any \SLCO model, but as a more complex protocol is introduced by the transformation, it adds more complexity to the produced model. Both transformations require the following two constraints to hold for their input. First, the objects that communicate via the synchronous channel must be the only instances of their classes. Second, only a single pair of state machines from the two classes may communicate over the channel. We stress, however, that this does not limit their applicability. By means of the \emph{Exclusive Channels} and \emph{Clone Classes} transformations, any \SLCO model can be transformed into a model with equivalent behavior that meets these constraints. Thus, instead of having more complicated transformations that first change models to meet these constraints and then replace synchronous communication by asynchronous communication, we opt for sequences of simpler transformations that have the same effect. The fact that the constraints hold can be used in the correctness proof of \TSim and \TGen, which greatly simplifies these proofs. In the remainder of this section, we provide a short description of the aforementioned transformations. An informal description is given in Section~\ref{subsubsec:slco:sync2async}, and a more detailed description is given in Appendix~\ref{ap:transformations-slco}. For the rest of the section, we assume that in the model~$m$, the synchronous channel $\it{ch}_s = \it{chn}()~\textbf{sync from}~\it{on_1}.\it{pn_1}~\textbf{to}~\it{on_2}.\it{pn_2}$ is to be replaced with an asynchronous one. Let object~$o_i$ with name~$\it{on_i}$ be an instance of class~$\it{cl_i}$ with name~$\it{cn_i}$ in model~$m$, for $i=1,2$. We also assume, as explained above, that object~$o_i$ is the only instance of class~$\it{cl_i}$ and that state machine~$\it{sm_i}$ with name~$\it{smn_i}$ is the only state machine in~$\it{cl_i}$ that uses channel~$\it{ch_s}$, for $i=1,2$. Furthermore, we use $\it{tr_s} = \it{tn_s}~\textbf{from}~\it{ss_1}~\textbf{to}~\it{ss_2}~\textbf{send}~\it{sgn}()~\textbf{to}~\it{pn_1}$ to denote a transition of $sm_1$ of $cl_1$ that sends signals over~$\it{ch_s}$ and $\it{tr_r} = \it{tn_r}~\textbf{from}~\it{sr_1}~\textbf{to}~\it{sr_2}~\textbf{receive}~\it{sgn}()~\textbf{from}~\it{pn_2}$ to denote a transition of $sm_2$ of $cl_2$ that receives signals over~$\it{ch_s}$. Due to the uniqueness of the channel name and the aforementioned constraints, the transformation of the channel $\it{ch_s}$ induces a transformation of the classes $cl_1$ and $cl_2$ only. We show only the transformation of signals without arguments, but an extension to general signals is straightforward. In the remainder of this chapter, we deal with simplified \SLCO models, as described in Section~\ref{sec:SLCO:simplified_slco}. \subsubsection{Simple Transformation} Transformation \TSim modifies state machines by replacing some of their transitions. No essential changes are made to the other structures of a model. It is only applicable if, for every transition~$\it{tr_s}$, there is no other transition with the same source state. For every transition~$\it{tr_s}$ of $\it{sm_1}$ and for every transition $\it{tr_r}$ in $\it{sm_2}$, we define \[ \begin{array}{l} \TSim(\it{tr_s}, \it{pn_1}) = \langle \\ \quad \it{ss_{nw}}, \\ \quad \it{tn_s^1}~\textbf{from}~\it{ss_1}~\textbf{to}~\it{ss_{nw}}~\textbf{send}~\it{ssgn}()~\textbf{to}~\it{pn_1} \\ \quad \it{tn_s^2}~\textbf{from}~\it{ss_{nw}}~\textbf{to}~\it{ss_2}~\textbf{receive}~\it{asgn}()~\textbf{from}~\it{pn_1} \\ \rangle \end{array} \] \[ \begin{array}{l} \TSim(\it{tr_r}, \it{pn_2}) = \langle \\ \quad \it{sr_{nw}}, \\ \quad \it{tn_r^1}~\textbf{from}~\it{sr_1}~\textbf{to}~\it{sr_{nw}}~\textbf{receive}~\it{ssgn}()~\textbf{from}~\it{pn_2} \\ \quad \it{tn_r^2}~\textbf{from}~\it{sr_{nw}}~\textbf{to}~\it{sr_2}~\textbf{send}~\it{asgn}()~\textbf{to}~\it{pn_2} \\ \rangle, \end{array} \] \noindent where $\it{ss_{nw}}$ and $\it{sr_{nw}}$ are fresh state names, $\it{tn_s^1}$, $\it{tn_s^2}$, $\it{tn_r^1}$, and $\it{tn_r^2}$ are fresh transition names, $\it{ssgn} \equiv ``s\_"+sgn$, and $\it{asgn} \equiv ``a\_"+sgn$. In the transformed model, the new states are added to the appropriate state machines, and the transitions~$\it{tr_s}$ and~$\it{tr_r}$ are replaced by the newly generated transitions. For ease of reference, a graphical representation of the states and transitions in the definition above is given in Figure~\ref{fig:reusable-correct-transformations:trans_simple}. \begin{figure}[hbt] \centering \includegraphics[scale=0.45]{reusable-correct-transformations/figs/transformation_simple} \caption{Partial state machines that illustrate \TSim} \label{fig:reusable-correct-transformations:trans_simple} \end{figure} \subsubsection{General Transformation} Transformation \TGen is more general than \TSim, due to which it adds more complexity to the produced model. In this case, also classes are transformed, and new state machines are created. The restrictions we had on \TSim are removed, which means that \TGen can be applied to any $\it{tr_s}$ of $\it{sm_1}$ and any $\it{tr_r}$ of $\it{sm_2}$ as defined above. Transformation~\TGen on transitions is defined as \[ \begin{array}{l} \TGen(\it{tr_s}, \it{pn_1}, \it{vc_1}) = \langle \\ \quad \it{ss_3}~\it{ss_4}~\it{ss_5}~\it{ss_6}~\it{ss_7}, \\ \quad \it{ts_1}~\textbf{from}~\it{ss_1}~\textbf{to}~\it{ss_3}~\it{vc_1}==0 \\ \quad \it{ts_2}~\textbf{from}~\it{ss_3}~\textbf{to}~\it{ss_4}~\textbf{send}~\it{sgn}(1)~\textbf{to}~\it{pn_1} \\ \quad \it{ts_3}~\textbf{from}~\it{ss_4}~\textbf{to}~\it{ss_5}~\it{vc_1}==2 \\ \quad \it{ts_4}~\textbf{from}~\it{ss_5}~\textbf{to}~\it{ss_6}~\textbf{send}~\it{sgn}(3)~\textbf{to}~\it{pn_1} \\ \quad \it{ts_5}~\textbf{from}~\it{ss_6}~\textbf{to}~\it{ss_2}~\it{vc_1}==0 \\ \quad \it{ts_6}~\textbf{from}~\it{ss_7}~\textbf{to}~\it{ss_1}~\textbf{send}~\it{sgn}(4)~\textbf{to}~\it{pn_1} \\ \quad \it{ts_7}~\textbf{from}~\it{ss_4}~\textbf{to}~\it{ss_7}~\it{vc_1}:=2 \\ \rangle \end{array} \] \[ \begin{array}{l} \TGen(\it{tr_r}, \it{pn_2}, \it{vc_2}) = \langle \\ \quad \it{sr_3}~\it{sr_4}~\it{sr_5}~\it{sr_6}~\it{sr_7}, \\ \quad \it{tr_1}~\textbf{from}~\it{sr_1}~\textbf{to}~\it{sr_3}~\it{vc_2}==1 \\ \quad \it{tr_2}~\textbf{from}~\it{sr_3}~\textbf{to}~\it{sr_4}~\textbf{send}~\it{sgn}(2)~\textbf{to}~\it{pn_2} \\ \quad \it{tr_3}~\textbf{from}~\it{sr_4}~\textbf{to}~\it{sr_5}~\it{vc_2}==3 \\ \quad \it{tr_4}~\textbf{from}~\it{sr_5}~\textbf{to}~\it{sr_2}~\textbf{send}~\it{sgn}(0)~\textbf{to}~\it{pn_2} \\ \quad \it{tr_5}~\textbf{from}~\it{sr_4}~\textbf{to}~\it{sr_1}~\it{vc_2}==4 \\ \quad \it{tr_6}~\textbf{from}~\it{sr_7}~\textbf{to}~\it{sr_1}~\textbf{send}~\it{sgn}(0)~\textbf{to}~\it{pn_2} \\ \quad \it{tr_7}~\textbf{from}~\it{sr_6}~\textbf{to}~\it{sr_7}~\it{vc_2}:=3 \\ \quad \it{tr_8}~\textbf{from}~\it{sr_1}~\textbf{to}~\it{sr_6}~\it{vc_2}==4 \\ \rangle, \end{array} \] \noindent where $\it{ss_j}$ and $\it{sr_j}$, and $\it{ts_j}$ and $\it{tr_k}$ are fresh state and transition names, for~$j=3,\ldots,7$ and~$k=3,\ldots,8$. Variables~$\it{vc}_1$ and $\it{vc}_2$ are discussed below. A graphical representation of the states and transitions in the definition above is depicted by the two partial state machines on the left of Figure~\ref{fig:reusable-correct-transformations:trans_general}. \begin{figure}[hbt] \centering \includegraphics[scale=0.45]{reusable-correct-transformations/figs/transformation_general} \caption{Partial state machines that illustrate \TGen} \label{fig:reusable-correct-transformations:trans_general} \end{figure} In the transformed model, the new states are added to the appropriate state machines, and transitions~$\it{tr_s}$ and~$\it{tr_r}$ are replaced by the new transitions. Additionally, a fresh integer variable~$\it{vc_i}$ and a state machine~$\it{reader_i}$ are added to the classes~$\it{cl_i}$, for~$i=1,2$. Let $\it{Tr^G_i}$ be the sets of all $\it{tr_s}$ and $\it{tr_r}$-like transitions of $sm_i$, for~$i=1,2$, $\it{Sgn_1}$ the set of all signal names occurring in the sending statements of transitions in $\it{Tr^G_1}$, and $\it{Sgn_2}$ the set of all signal names occurring in the reception statements of transitions in $\it{Tr^G_2}$. State machine~$\it{reader_i}$ is a result of applying function~$\it{Rsm}$, defined as % \begin{align*} \it{Rsm} & (\it{pn_i}, \it{vc_i}, \it{Sgn_i}) = \it{reader_i}~\textbf{initial}~\it{init_i} \\ & [\it{tsgn_i}~\textbf{from}~\it{init_i}~\textbf{to}~\it{init_i}~\textbf{receive}~\it{sgn}(\it{vc_i})~\textbf{from}~\it{pn_i}~|~ sgn\in Sgn_i], \end{align*} % \noindent where $\it{reader_i}$ is a fresh state machine name, $\it{init_i}$ is a fresh state name, and $tsgn \equiv ``t\_"+sgn$. As defined, state machine~$\it{reader_i}$ has a transition for every signal name $\it{sgn}$ from $\it{Sgn_i}$. On the right of Figure~\ref{fig:reusable-correct-transformations:trans_general}, an example of such a state machine is shown.
{ "alphanum_fraction": 0.700729464, "avg_line_length": 75.0714285714, "ext": "tex", "hexsha": "526db62815d7170f1f8baf8ecce9c3b6aaa6f60b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ljpengelen/latex-phd-thesis", "max_forks_repo_path": "reusable-correct-transformations/model_transformations.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ljpengelen/latex-phd-thesis", "max_issues_repo_path": "reusable-correct-transformations/model_transformations.tex", "max_line_length": 264, "max_stars_count": 1, "max_stars_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ljpengelen/latex-phd-thesis", "max_stars_repo_path": "reusable-correct-transformations/model_transformations.tex", "max_stars_repo_stars_event_max_datetime": "2019-12-18T21:53:57.000Z", "max_stars_repo_stars_event_min_datetime": "2019-12-18T21:53:57.000Z", "num_tokens": 4681, "size": 15765 }
\section{Introduction} This document contains a methodology that can be used to design an axisymmetric rocket nozzle. The design method incorporates such variables as \emph{(a)} fuel/oxidizer combination used in the combustion chamber, \emph{(b)} desired Mach number at the nozzle exit plane, and \emph{(c)} viscous effects. The combustion process is modeled assuming equilibrium combustion at constant pressure using the Gibbs Minimization Technique. The post combustion mixture properties (species mass fractions, specific heat ratio, etc.) are then used as the input conditions for a converging/diverging supersonic nozzle which assumes frozen flow throughout the expansion process. The converging nozzle section is based on an algebraic expression while the diverging contour is designed such that a specific Mach number distribution along the nozzle centreline is obtained. This specific contour is found using the two-dimensional method of characteristics. The beginning of this document contains a brief description of the theory used in the design process while the remaining sections contain examples of the theory applied to several cases of interest. The entire set of input variables required for a complete nozzle design are listed in Table \ref{table:inputvariables}. \begin{table}[!h] \begin{center}\fontsizetable \begin{threeparttable} \tablecaption{Rocket Nozzle Design Parameters} \begin{tabular}{cc} \toprule Design Variable & Typical Values\\ \midrule Fuel & Kerosene ($C_{12} H_{24}$), Hydrogen ($H_2$)\\ Oxidizer & Air (79\% $N_2$, 21\% $O_2$), Oxygen ($O_2$)\\ Equivalence Ratio & 0.1 - 3.0\\ Fuel Input Temperature & 300 - 1,000 K\\ Total Pressure & 1 - 10 MPa\\ \\ Exit Mach Number & 1.5 - 6.0\\ Maximum Expansion Angle & 10 - 15 degrees\\ Throat Height & 5 - 50 mm\\ Viscosity & Yes/No\\ Nozzle Wall Temperature & 500 - 1000 K\\ Mach Number at Combustion Chamber Exit & 0.001 - 0.1\\ Subsonic Nozzle Section Length \\(\% of Supersonic Length) & 15\% - 45\%\\ \bottomrule \end{tabular} %\begin{tablenotes} %\end{tablenotes} \label{table:inputvariables} \end{threeparttable} \end{center} \end{table} %------------------------------------------------------------------------------------------------- \section{Combustion Chamber Theory} This section outlines a method that can be used to calculate the chemical composition of a post combustion mixture based on the assumption of achieving chemical equilibrium. This assumption goes one step further than the basic complete combustion assumption from which stoichiometric ratios are calculated, but still lies one step short of accounting for the complete combustion scenario. Equilibrium combustion allows for the fact that species other than those found in the complete combustion case can be present in the post combustion mixture (i.e. dissociated products, incomplete combustion products, etc.) but does not take into account the time rate of change of these species. Thus it is the non-equilibrium combustion process carried through sufficient time to allow chemical equilibrium to be achieved (or thought of another way, it is the non-equilibrium combustion process assuming infinite reaction rates). There are two major methods for calculating chemical equilibrium mixtures, the Equilibrium Constant Method and the Gibbs Minimization Method, the latter of which is described here. \subsection{Gibbs Minimization Technique} The major attraction of the Gibbs Minimization Technique for solving chemical equilibrium over the more traditional Equilibrium Constant Method is that the system of equations obtained from the Gibbs method is usually easier to solve than that for the Equilibrium Constant method. In the Equilibrium Constant Method one is required to define $n$ distinct chemical reactions (where $n$ is equal to the total number of species to be considered, $n_s$, minus 2) which along with Dalton's Law of partial pressures and the fact that the ratio of fuel atoms to oxidizer atoms is constant (in the absence of nuclear reactions) yields a system of $n$ equations to be solved simultaneously. The drawback of this approach is that due to the nature of the equilibrium constant, the system of equations is normally non-linear and hence very difficult to solve. However the Gibbs Minimization Technique, although perhaps less intuitive and slightly more complex in its derivation, results in a system of only $n_a + 2$ equations (where $n_a$ is the total number of distinct atomic particles involved in the reaction, which for the case of most hydrocarbon fuels is 4 [C, H, N, O]). As well as significantly reducing the number of equations that need to be solved, this approach also results in a linear system of equations allowing for a much simpler solution process. Starting first by defining the Gibbs energy of a species $k$, \begin{equation} g_k = h_k - Ts_k = h^o_k - Ts^o_k + \overline{R}T \ln(\frac{\sigma_k}{\sigma_m}) + \overline{R}T \ln(\frac{p}{p^o}) \label{eqn:gibbsk} \end{equation} where $h_k$ and $s_k$ are the enthalpy and entropy of species $k$ respectively at the temperature $T$ (assuming that the species is being considered on its own). However, since in most cases these values are found via experimentally determined polynomials which are functions of $T$ taken at a particular reference pressure $p^o$ (usually 1 atm = 101325 Pa), the values $h^o_k$ and $s^o_k$ can be used if the term in Eq. \ref{eqn:gibbsk} involving the actual pressure, $p$ (which must be in units of atm), is added. Also, to consider a species $k$ which is part of a mixture of other species, the remaining term in Eq. \ref{eqn:gibbsk} involving the species kmols, $\sigma_k$, and the total kmols of the entire mixture, $\sigma_m$, is needed to account for the energy available through the expansion of species $k$ from its partial pressure in the mixture to the ambient pressure of the mixture as a whole. Note that Eq. \ref{eqn:gibbsk} yields the Gibbs energy in units of [J/kmol] when the units of the Universal Gas Constant, $\overline{R}$, are 8314.3 [J/kmol K]. Having established the Gibbs energy of an individual species within a mixture on a molar basis, the Gibbs energy of the entire mixture can be written as, \begin{equation} G = \sum_{k=0}^{k=\overline{n_s}} \sigma_k g_k \label{eqn:totgibbs} \end{equation} where $\overline{n_s}$ is the total number of species being considered, $n_s$, minus 1 (since the index starts at zero). As the name of the method implies, finding the equilibrium composition involves taking the derivative of the Gibbs energy (Eq. \ref{eqn:totgibbs}) with respect to each species under consideration and setting this equal to zero (hence minimizing the Gibbs function), \begin{displaymath} dG=\sum_{j=0}^{j=\overline{n_s}} \frac{\partial G}{\partial \sigma_j} d\sigma_j = 0 \end{displaymath} which is a condition for a mixture in chemical equilibrium. Substituting the definition of total Gibbs energy (Eqs. \ref{eqn:gibbsk} and \ref{eqn:totgibbs}) into the above while multiplying out the sum over $k$, defining a reference Gibbs energy as \begin{equation} g_k^o=h_k^o -Ts_k^o \label{eqn:gibbsk2} \end{equation} and realizing that the total kmols is simply the sum of the individual species kmols, \begin{equation} \sigma_m = \sum_{i=0}^{i=\overline{n_s}}\sigma_i \label{eqn:sumkmol} \end{equation} one can write, \begin{displaymath} dG=\sum_{j=0}^{j=\overline{n_s}} \frac{\partial}{\partial \sigma_j}\Bigg\{ \sum_{k=0}^{k=\overline{n_s}} \sigma_k g^o_k + \sum_{k=0}^{k=\overline{n_s}} \sigma_k \overline{R}T \ln(\sigma_k) - \sum_{k=0}^{k=\overline{n_s}} \sigma_k \overline{R}T \ln (\sum_{i=0}^{i=\overline{n_s}} \sigma_i) + \sum_{k=0}^{k=\overline{n_s}} \sigma_k \overline{R}T \ln(\frac{p}{p^o})\Bigg\}d\sigma_j=0 \end{displaymath} After evaluating the derivative above, the familiar form of the chemical equilibrium condition can be obtained, \begin{equation} \fbox{$ dG = \sum_{k=0}^{k=\overline{n_s}}g_k d\sigma_k = 0 $} \label{eqn:equibcond} \end{equation} As well as specifying the minimization of the Gibbs energy at equilibrium, one can also make use of the fact that although there are $n_s$ species, each of these is composed of ratios of elementary particles, of which there are far fewer. Thus no matter how many species are considered (representing numerous chemical reactions) the total number of elementary atoms must remain constant. Expressing this relation mathematically, \begin{equation} \fbox{$ \sum_{k=0}^{k=\overline{n_s}}\eta_{ik}\sigma_k = b_i^*; \hspace{15mm} i=0 \rightarrow \overline{n_a} $} \label{eqn:totatoms} \end{equation} where $\eta_{ik}$ is the number of kg-atoms of $i$ per kmol of species $k$ (e.g. for $O_2$ $\eta = 2$) and $b_i^*$ is the total number of kg-atoms of atom $i$. The index $i$ goes from 0 to $\overline{n_a}$ where $\overline{n_a}$ is the total number of distinct atoms, $n_a$, minus 1. Another constraint equation can be derived from the fact that total energy must also be conserved, which for the case of an adiabatic reaction yields, \begin{equation} \fbox{$ \sum_{k=0}^{k=\overline{n_s}}h_k\sigma_k = H^* $} \label{eqn:totenth} \end{equation} which is simply an equation expressing the conservation of total enthalpy during the combustion process. Note here that $h_k$ is the enthalpy of species $k$ per kmol of species $k$ while $H^*$ is the total enthalpy of the entire mixture (assuming negligible velocity within the combustion chamber). Going back to Eq. \ref{eqn:totatoms}, taking its derivative with respect to $\sigma_k$ and multiplying each $i$ equation by a constant $\lambda_i$ one gets, \begin{displaymath} \lambda_i\Bigg\{\sum_{k=0}^{k=\overline{n_s}}\eta_{ik}d\sigma_k\Bigg\} = 0 \hspace {15mm} i=0\rightarrow \overline{n_a} \end{displaymath} since the total number of atoms of each type is a constant throughout combustion. Adding the above result to Eq. \ref{eqn:equibcond} yields, \begin{displaymath} \lambda_0\sum_{k=0}^{k=\overline{n_s}}\eta_{0k}d\sigma_k + \lambda_1\sum_{k=0}^{k=\overline{n_s}}\eta_{1k} d\sigma_k + \ldots + \lambda_{\overline{n_a}}\sum_{k=0}^{k=\overline{n_s}}\eta_{\overline{n_a} k} d\sigma_k + \sum_{k=0}^{k=\overline{n_s}}g_k d\sigma_k = 0 \end{displaymath} which after some manipulation can be reduced to, \begin{equation} \fbox{$ g_k + \sum_{i=0}^{i=\overline{n_a}}\lambda_i\eta_{ik} = 0 \hspace{15mm}k=0\rightarrow \overline{n_s} $} \label{eqn:lambdaone} \end{equation} At this point one must decide on a solution method for the equations derived above. Choosing a Newton-Raphson system of correction equations for which there are $n$ number of correction variables $\Delta x_j$ one can write (for the general functional $f_s$), \begin{equation} \sum_{j=1}^{j=n}\frac{\partial f_s}{\partial x_j}\Delta x_j = -f_s \label{eqn:newtraph} \end{equation} Putting Eq. \ref{eqn:totatoms} into this form yields, \begin{equation} f_s = f_i = \sum_{k=0}^{k=\overline{n_s}}\eta_{ik}\sigma_k - b_i^* \hspace{15mm}i=0\rightarrow \overline{n_a} \label{eqn:fi} \end{equation} while the conservation of enthalpy equation (Eq. \ref{eqn:totenth}) can be expressed in this form as (dividing by $\overline{R}T$ as well), \begin{equation} f_s = f_T = \sum_{k=0}^{k=\overline{n_s}}\frac{h_k}{\overline{R}T}\sigma_k - \frac{H^*} {\overline{R}T} \label{eqn:ft} \end{equation} If one chooses to define a non-dimensional Lagrange multiplier, \begin{equation} \pi_i = -\frac{\lambda_i}{\overline{R}T} \label{eqn:lagrange} \end{equation} then one can also phrase each of the $\overline{n_s}$ equations in Eq. \ref{eqn:lambdaone} as \begin{equation} f_s = f_k = \frac{g_k}{\overline{R}T} - \sum_{i=0}^{i=\overline{n_a}}\pi_i\eta_{ik} \hspace{15mm}k=0\rightarrow \overline{n_s} \label{eqn:fk} \end{equation} As a final step, one can also define a constraint equation based on the fact that the individual kmols of each species $k$ must sum to the total kmols of the mixture, \begin{equation} f_s = f_m = \sum_{k=0}^{k=\overline{n_s}}\sigma_k - \sigma_m \label{eqn:fm} \end{equation} (Aside: Lagrange multipliers are used when one wishes to find the extreme values of a function [either the maximum or minimum values] subject to certain side constraints. Thus in this case, one is trying to obtain the minimum value of the Gibbs energy subject to the side constraints of (a) conservation of total enthalpy (b) conservation of the total number of atoms and (c) ensuring the sum of the kmols of each species equals the independently determined total kmols of mixture.) Having defined the desired functionals, the next step is to define the correction variables to be used. In this case, by judicious choice of these values one can simplify the resulting system of equations to be solved. If one chooses $\Delta \ln(\sigma_k)$ ($k = 0 \rightarrow \overline{n_s}$), $\Delta \ln(\sigma_m)$, and $\Delta \ln(T)$ then the Newton-Raphson scheme in Eq. \ref{eqn:newtraph} can be written for a general functional $f_s$ as, \begin{equation} \fbox{$ \sum_{k=0}^{k=ns}\frac{\partial f_s}{\partial \ln(\sigma_k)}\Delta \ln(\sigma_k) + \frac{\partial f_s}{\partial \ln(\sigma_m)}\Delta \ln(\sigma_m) + \frac{\partial f_s} {\partial \ln(T)}\Delta \ln(T) = - f_s $} \label{eqn:newtfull} \end{equation} Starting first with Eq. \ref{eqn:fk} (which itself is actually $n_s$ equations) and taking the required derivatives will allow its substitution into Eq. \ref{eqn:newtfull} (this process will be shown for this functional, while only the final results will be shown for the remaining variables). Substituting the index $j$ for $k$ to avoid confusion one can write, \begin{displaymath} f_j = \frac{g_j}{\overline{R}T} - \sum_{i=0}^{i=\overline{n_a}}\pi_i\eta_{ij} \hspace{15mm}j=0\rightarrow \overline{n_s} \end{displaymath} which when replacing $g_j$ with Eqs. \ref{eqn:gibbsk} and \ref{eqn:gibbsk2} yields, \begin{displaymath} f_j = \frac{g_j^o}{\overline{R}T} + \ln(\sigma_j) - \ln(\sigma_m) + \ln(\frac{p}{p^o}) - \sum_{i=0}^{i=\overline{n_a}}\pi_i\eta_{ij} \hspace{15mm}j=0\rightarrow \overline{n_s} \end{displaymath} Now when taking the derivative of the above with respect to $\ln(\sigma_k)$, one notices that all the terms are constant except for the $\ln(\sigma_j)$ terms, and this too is only a variable if $k = j$. Therefore the first derivative in Eq. \ref{eqn:newtfull} can be evaluated as, \begin{equation} \sum_{k=0}^{k=\overline{n_s}}\frac{\partial f_j}{\partial \ln(\sigma_k)} = \delta_{jk} \hspace{15mm}j=0\rightarrow \overline{n_s} \label{eqn:fkderiv1} \end{equation} Again, for the second derivative term in Eq. \ref{eqn:newtfull} all the terms in $f_j$ are constant but for $\ln(\sigma_m)$ thus, \begin{equation} \frac{\partial f_j}{\partial \ln(\sigma_m)} = -1 \hspace{15mm}j=0\rightarrow \overline{n_s} \label{eqn:fkderiv2} \end{equation} In order to evaluate the last derivative term from Eq. \ref{eqn:newtfull}, one can make use of the following relation from the Calculus, \begin{equation} \frac{\partial y}{\partial z} = \frac{\partial y}{\partial x} \frac{\partial x}{\partial z} \label{eqn:calculus} \end{equation} which when applied to the particular case here yields, \begin{equation} \frac{\partial f_s}{\partial \ln(T)} = T\frac{\partial f_s}{\partial T} \label{eqn:dlnt} \end{equation} Now since both $\pi_i$ and $\eta_{ij}$ are constants, the final derivative of $f_j$ with respect to temperature (or more exactly $\ln(T)$) can be reduced to (applying the result of Eq. \ref{eqn:dlnt}), \begin{equation} \frac{\partial f_j}{\partial \ln(T)} = - \frac{h_j}{\overline{R}T} \hspace{15mm}j=0\rightarrow \overline{n_s} \label{eqn:fkderiv3} \end{equation} Therefore, combining the results of Eqs. \ref{eqn:fkderiv1}, \ref{eqn:fkderiv2}, and \ref{eqn:fkderiv3} with Eq. \ref{eqn:newtfull} and remembering that these results apply over all the species one obtains (rearranging and re-substituting $k$ for $j$ while removing the Kronecker delta), \begin{equation} \fbox{$ \Delta \ln(\sigma_k) = \Delta \ln(\sigma_m) + \frac{h_k}{\overline{R}T}\Delta \ln(T) - \frac{g_k}{\overline{R}T} + \sum_{i=0}^{i=\overline{n_a}}\pi_i\eta_{ik} \hspace{15mm}k=0\rightarrow \overline{n_s} $} \label{eqn:dsigmak} \end{equation} With this equation, if one can solve for $\Delta \ln(\sigma_m)$, $\Delta \ln(T)$, and $\pi_i$ (of which there are $n_a$ of the latter variable) then one can use this result to obtain the equilibrium molar amounts of each species being considered in the combustion process. To solve for these variables, the remaining functional equations derived earlier can be used. Using Eq. \ref{eqn:fi}, taking its derivatives and substituting the results into Eq. \ref{eqn:newtfull} yields, \begin{displaymath} \sum_{k=0}^{k=\overline{n_s}}\sigma_k\eta_{ik}\Delta \ln(\sigma_k) = - \sum_{k=0}^{k=\overline{n_s}}\eta_{ik}\sigma_k + b_i^* \hspace{15mm}i=0\rightarrow \overline{n_a} \label{eqn:one} \end{displaymath} Replacing $\Delta \ln(\sigma_k)$ by using Eq. \ref{eqn:dsigmak} in the above and rearranging yields, \begin{equation} \fbox{$ \begin{array}{c} \sum_{j=0}^{j=\overline{n_a}}\Big\{\sum_{k=0}^{k=\overline{n_s}}\sigma_k\eta_{ik}\eta_{jk}\Big\}\pi_j + \Big\{\sum_{k=0}^{k=\overline{n_s}}\sigma_k\eta_{ik}\Big\}\Delta \ln(\sigma_m) \\+ \Big\{\sum_{k=0}^{k=\overline{n_s}}\sigma_k\eta_{ik}\frac{h_k}{\overline{R}T}\Big\}\Delta \ln(T) \end{array} = b_i^* - \sum_{k=0}^{k=\overline{n_s}}\sigma_k\eta_{ik} + \sum_{k=0}^{k=\overline{n_s}}\sigma_k \eta_{ik}\frac{g_k}{\overline{R}T} $} \label{eqn:final1} \end{equation} where $i=0\rightarrow \overline{n_a}$. Considering next the functional in Eq. \ref{eqn:fm} and repeating the derivative process yields after substitution into Eq. \ref{eqn:newtfull}, \begin{displaymath} \sum_{k=0}^{k=\overline{n_s}}\sigma_k \Delta \ln(\sigma_k) - \sigma_m \Delta \ln(\sigma_m) = - \sum_{k=0}^{k=\overline{n_s}}\sigma_k - \sigma_m \label{eqn:two} \end{displaymath} Again replacing $\Delta \ln(\sigma_k)$ by using Eq. \ref{eqn:dsigmak} in the above yields (after some rearranging), \begin{equation} \fbox{$ \begin{array}{c} \sum_{i=0}^{i=\overline{n_a}}\Big\{\sum_{k=0}^{k=\overline{n_s}}\eta_{ik}\sigma_k\Big\}\pi_i + \Big\{\sum_{k=0}^{k=\overline{n_s}}\sigma_k - \sigma_m\Big\} \Delta \ln(\sigma_m) \\+ \Big\{\sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{h_k}{\overline{R}T}\Big\}\Delta \ln(T) \end{array} =\sigma_m + \sum_{k=0}^{k=\overline{n_s}}\sigma_k(\frac{g_k}{\overline{R}T} - 1) $} \label{eqn:final2} \end{equation} The results from the last functional (Eq. \ref{eqn:ft}) can be expressed as, \begin{displaymath} \sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{h_k}{\overline{R}T}\Delta \ln(\sigma_k) + \sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{c_{p_k}}{\overline{R}}\Delta \ln(T) = -\sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{h_k}{\overline{R}T} + \frac{H^*}{\overline{R}T} \end{displaymath} which after substituting Eq. \ref{eqn:dsigmak} and rearranging becomes, \begin{equation} \fbox{$ \begin{array}{c} \sum_{i=0}^{i=\overline{n_a}}\Big\{\sum_{k=0}^{k=\overline{n_s}}\sigma_k\eta_{ik}\frac{h_k} {\overline{R}T}\Big\}\pi_i + \Big\{\sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{h_k} {\overline{R}T}\Big\}\Delta \ln(\sigma_m) \\+ \Big\{\sum_{k=0}^{k=\overline{n_s}}\sigma_k [\frac{c_{p_k}}{\overline{R}} + (\frac{h_k}{\overline{R}T})^2]\Big\}\Delta \ln(T) \end{array} = \frac{H^*}{\overline{R}T} + \sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{h_k}{\overline{R}T} (\frac{g_k}{\overline{R}T} - 1) $} \label{eqn:final3} \end{equation} Using Eqs. \ref{eqn:final1}, \ref{eqn:final2}, and \ref{eqn:final3} one now has $n_a + 2$ number of equations using only $n_a + 2$ number of distinct variables ($\pi_1 \ldots \pi_{n_a}, \Delta \ln(\sigma_m),$ and $\Delta \ln(T)$). Furthermore, these equations form a system of linear equations which can be readily solved using Gaussian elimination. If written in the form $Ax = R$ where $A$ is the coefficient matrix, $x$ is the solution vector, and $R$ is a column vector of known quantities then the system looks like, \begin{equation} A = \left[ \begin{array}{ccccc} \sum_{k=0}^{k=\overline{n_s}}\eta_{0k}\eta_{0k}\sigma_k & \ldots & \sum_{k=0}^{k=\overline{n_s}}\eta_{0k}\eta_{\overline{n_a}k}\sigma_k & \sum_{k=0}^{k=\overline{n_s}}\eta_{0k}\sigma_k & \sum_{k=0}^{k=\overline{n_s}}\eta_{0k} \sigma_k\frac{h_k}{\overline{R}T} \\ \\ \vdots & \ddots & \vdots & \vdots & \vdots \\ \\ \sum_{k=0}^{k=\overline{n_s}}\eta_{\overline{n_a}k}\eta_{0k}\sigma_k & \ldots & \sum_{k=0}^{k=\overline{n_s}}\eta_{\overline{n_a}k}\eta_{\overline{n_a}k}\sigma_k & \sum_{k=0}^{k=\overline{n_s}}\eta_{\overline{n_a}k}\sigma_k & \sum_{k=0}^{k=\overline{n_s}}\eta_{\overline{n_a}k}\sigma_k\frac{h_k}{\overline{R}T} \\ \\ \sum_{k=0}^{k=\overline{n_s}}\eta_{0k}\sigma_k & \ldots & \sum_{k=0}^{k=\overline{n_s}}\eta_{\overline{n_a}k}\sigma_k & \sum_{k=0}^{k=\overline{n_s}}\sigma_k - \sigma_m & \sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{h_k}{\overline{R}T} \\ \\ \sum_{k=0}^{k=\overline{n_s}}\eta_{0k}\sigma_k\frac{h_k}{\overline{R}T} & \ldots & \sum_{k=0}^{k=\overline{n_s}}\eta_{\overline{n_a}k}\sigma_k\frac{h_k}{\overline{R}T} & \sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{h_k}{\overline{R}T} & \sum_{k=0}^{k=\overline{n_s}}\sigma_k[\frac{c_{p_k}}{\overline{R}} + (\frac{h_k}{\overline{R}T})^2] \end{array} \right] \label{eqn:matrix} \end{equation} with \begin{equation} x = \left[ \begin{array}{c} \pi_1 \\ \\ \vdots \\ \\ \pi_{n_a} \\ \\ \Delta \ln(\sigma_m) \\ \\ \Delta \ln(T) \end{array} \right] \hspace{2cm} R = \left[ \begin{array}{c} b_0^* + \sum_{k=0}^{k=\overline{n_s}}\eta_{0k}\sigma_k(\frac{g_k}{\overline{R}T} - 1) \\ \\ \vdots \\ \\ b_{\overline{n_a}}^* + \sum_{k=0}^{k=\overline{n_s}}\eta_{\overline{n_a}k}\sigma_k (\frac{g_k}{\overline{R}T} - 1) \\ \\ \sigma_m + \sum_{k=0}^{k=\overline{n_s}}\sigma_k(\frac{g_k}{\overline{R}T} - 1) \\ \\ \frac{H^*}{\overline{R}T} + \sum_{k=0}^{k=\overline{n_s}}\sigma_k\frac{h_k}{\overline{R}T} (\frac{g_k}{\overline{R}T} - 1) \end{array} \right] \label{eqn:r} \end{equation} The actual change in kmols of each species can be found using the solution vector $x$ and Eq. \ref{eqn:dsigmak}. %---------------------------------------------------------------------------------------------------- \subsection{Numerical Solution} The numerical solution of the Gibbs Minimization Technique involves the solution of the system of equations described by Eqs. \ref{eqn:matrix} and \ref{eqn:r}. However, depending on the initial values of the amounts of kmols of each species and the initial equilibrium temperature (which are in essence simply a guess at the equilibrium solution), the final answer is obtained only after several inversions of the matrix in Eq. \ref{eqn:matrix} (one must be careful to distinguish between the initial conditions for combustion [i.e. total temperature and fuel to oxidizer ratio, which do not change and are used for calculating properties such as $H^*$ and $b_i^*$] and the initial guess at the equilibrium solution [i.e. the initial values of $\sigma_k$ and $T$]). In order to ensure that the numerical process remains stable, a relaxation parameter is used which ensures that the rates of change do not rise so fast as to lead to numerical instability of the iterative process. As the process approaches the equilibrium solution, the relaxation parameter goes to 1 in essence removing any damping near the final answer. The relaxation parameter is calculated as follows. For species whose kmol amounts are greater than some threshold value (the limit below which a species is considered minor, eg. $1 x 10^{-8}$ kmols) and whose $\Delta \ln(\sigma_k)$ is positive one calculates a variable $\beta_1$, \begin{equation} \beta_1=\frac{2}{\max\Big(\vert\Delta \ln(T)\vert, \vert\Delta \ln(\sigma_m)\vert, \vert\Delta \ln(\sigma_k)\vert\Big)} \label{eqn:beta1} \end{equation} while for the remaining species (those below the threshold value, the minor species) with positive $\Delta \ln(\sigma_k)$ values one calculates $\beta_2$ as \begin{equation} \beta_2=\frac{\ln(1 x 10^{-4}) - \ln(\sigma_k) + \ln(\sigma_m)}{\Delta \ln(\sigma_k) - \Delta \ln(\sigma_m)} \label{eqn:beta2} \end{equation} with these two values found, one then calculates the final relaxation parameter, $\beta$, as the minimum of these two values and 1, \begin{equation} \beta= \min(1,\beta_1, \beta_2) \label{eqn:beta} \end{equation} With the relaxation parameter calculated one can find new values of the desired variables using the relations, \begin{displaymath} \ln(T_{new}) = \ln(T_{old}) + \beta \Delta \ln(T) \end{displaymath} \begin{displaymath} \ln(\sigma_{m_{new}}) = \ln(\sigma_{m_{old}}) + \beta \Delta \ln(\sigma_m) \end{displaymath} \begin{displaymath} \ln(\sigma_{k_{new}}) = \ln(\sigma_{k_{old}}) + \beta \Delta \ln(\sigma_k) \hspace{15mm}k=0 \rightarrow \overline{n_s} \end{displaymath} The equilibrium solution is obtained when some suitably defined convergence tolerance is met, such as \begin{equation} \frac{\vert\sum_{k=0}^{k=\overline{n_s}}\sigma_k - \sigma_m\vert}{\sigma_m} < 1 x 10^{-7} \label{eqn:converge} \end{equation} The last point to note in solving this system is the selection of initial conditions. It has been found that the following initial conditions work very well, leading to a converged solution for most cases in under approximately 30 iterations, \begin{table}[h] \begin{center} \fontsizetable \begin{threeparttable} \tablecaption{Suggested Initial Conditions} \begin{tabular}{ccc} \toprule $\sigma_m$ [kmol] & $\sigma_k$ [kmol] & $T_{eq}$ [K] \\ \midrule $0.1$ & $0.1/n_s ^*$ & $3800$ \\ \bottomrule \end{tabular} \begin{tablenotes} \item[*] where $n_s$ is the total number of species under consideration \end{tablenotes} \label{table:initial} \end{threeparttable} \end{center} \end{table} %-------------------------------------------------------------------------------------------- \section{Axisymmetric Nozzle Design Theory} For the nozzle design methodology, the inviscid flow of a perfect gas ($\gamma$ = constant) will be assumed throughout the derivation (a viscous correction being applied after an inviscid contour is determined). It will also be assumed that the final nozzle design will produce radial flow over a given region downstream of the sonic point, allowing certain pertinent radial flow equations to be used when determining nozzle properties. The remaining portions of the nozzle will then be determined based on the existing radial flow region. In this fashion, the desired Mach number distribution along the nozzle centreline will be determined, and a nozzle shape will be found which produces this set distribution. Also, the expansion process through the nozzle will be assumed to occur without chemical reactions (i.e. frozen flow) thereby keeping the mixture composition constant from the combustion chamber to the nozzle exit. The various flow zones within the diverging portion of the nozzle are shown in Fig. \ref{fig:nozzle}. The radial flow portion of the nozzle is bounded by the points B and D, and thus all radial flow equations apply only within this region. The distance $\epsilon$ is the distance downstream of the geometric throat at which the flow along the axisymmetric axis is sonic (note that for real nozzles the sonic line is not straight but slightly curved, shown in the figure as the line extending from the throat roof to point A). Point O is the origin of the radial flow region, which does not necessarily co-incide with the geometric throat, and point F is the distance at which the flow has reached the design exit Mach number (note that here, the angle of the characteristic originating from point F is simply $\sin^{-1}(\frac{1}{M_F})$). Point C is the location of the maximum expansion angle, which is also an inflection point since the nozzle is restricted from increasing in curvature past this point. \begin{figure}[hb] \begin{center} \psfrag{a}[c][c][0.6][0]{$\epsilon$} \psfrag{s}[c][c][0.6][0]{A} \psfrag{b}[c][c][0.6][0]{B} \psfrag{c}[c][c][0.6][0]{C} \psfrag{d}[c][c][0.6][0]{D} \psfrag{h}[c][c][0.6][0]{F} \psfrag{z}[c][c][0.6][0]{$X_{AB}$} \psfrag{v}[c][c][0.6][0]{$X_{DF}$} \psfrag{r}[c][c][0.5][0]{$\rho_t = Rh$} \psfrag{x}[c][c][0.5][0]{$\omega$} \psfrag{y}[c][c][0.5][0]{$h$} \psfrag{p}[c][c][0.5][0]{$h_{exit}$} \psfrag{o}[c][c][0.6][0]{O} \psfrag{w}[c][c][0.5][0]{$r$} \includegraphics[width=13cm,height=7cm]{nozzle.eps} \caption{Nomenclature for Nozzle Design} \label{fig:nozzle} \end{center} \end{figure} %---------------------------------------------------------------------------------------------------- \subsection{Inviscid Nozzle Design} \begin{equation} \vec{q}\cdot\nabla(\frac{q^2}{2}) - a^2(\nabla \cdot \vec{q}) = 0 \label{eqn:gasdynq} \end{equation} The \emph{gas dynamic equation} as shown above can be used as the starting point for developing the properties of a radial flow region (noting that $\vec{q}$ is the velocity vector, $q$ is the magnitude of the velocity vector, and $a$ is the speed of sound). In spherical co-ordinates (which are the most convenient to use when considering a radial flow, since the flow travels outwards in all directions from a single source) the gradient of a vector can be expressed as, \begin{equation} \nabla\cdot\vec{q} = \frac{1}{r^2}\frac{\partial}{\partial r}(r^2q_r) + \frac{1}{r \sin\theta}\frac{\partial}{\partial \theta}(q_{\theta}\sin\theta ) + \frac{1}{r \sin\theta}\frac{\partial q_{\phi}}{\partial \phi} \label{eqn:delq} \end{equation} Applying the radial flow condition requires that the velocity in both angular directions be zero (i.e. $q_{\theta} = q_{\phi} = 0$) thus reducing the above operator to, \begin{equation} \nabla\cdot\vec{q} = \frac{1}{r^2}\frac{d}{dr}(r^2q_r) = \frac{dq}{dr} + 2\frac{q}{r} \label{eqn:raddelq} \end{equation} where $q_r = q$ since there is only velocity in one co-ordinate direction. The $\nabla$ operator on a scalar quantity in spherical co-ordinates can be written as, \begin{equation} \nabla q = \frac{\partial q}{\partial r} + \frac{1}{r}\frac{\partial q}{\partial \theta} + \frac{1}{\sin \theta}\frac{\partial q}{\partial \phi} \label{eqn:delscalq} \end{equation} which after applying the radial flow constraints reduces to, \begin{equation} \nabla q = \frac{dq}{dr} \label{eqn:raddelscalq} \end{equation} Applying the results of Eqs. \ref{eqn:raddelq} and \ref{eqn:raddelscalq} to Eq. \ref{eqn:gasdynq} (and noting that $\vec{q} = q_r = q$ for radial flow) yields, \begin{equation} \fbox{$ (1 - M^2)\frac{dq}{dr} - 2\frac{q}{r} = 0 $} \label{eqn:radial} \end{equation} This equation is the governing equation for inviscid radial flow. %--------------------------------------------------------------------------------------------- \subsubsection{Radial Flow Region} Starting with the fundamental radial flow equation derived in Eq. \ref{eqn:radial} one can determine the relation between the Mach number at a given location and the distance of this point from the radial flow source. This is a key relationship given that it is the Mach number distribution along the nozzle centreline (which is simply the radius along a particular ray from the radial flow source) that will be used to dictate the final nozzle contour. The equations developed in this section apply between points B and D in Fig. \ref{fig:nozzle} Starting by defining a non-dimensional velocity as, \begin{equation} \fbox{$ W = \frac{q}{a^*} $} \label{eqn:w} \end{equation} where $a^*$ is the critical speed of sound (at M = 1), this non-dimensional velocity can be related to the Mach number as follows. Since $Ma = q$ and $Wa^*=q$ one can write, \begin{displaymath} (\frac{M}{W})^2 = (\frac{a^*}{a})^2 \end{displaymath} while from isentropic relations one can express the ratio of sound speeds as, \begin{equation} (\frac{a^*}{a})^2 = \frac{2}{(\gamma + 1)}\Big\{1 + \frac{\gamma-1}{2}M^2\Big\} \label{eqn:isenaratio} \end{equation} thus one can write, \begin{displaymath} (\gamma+1)M^2 = W^2\Big\{2+(\gamma-1)M^2\Big\} \end{displaymath} After isolating for $W^2$ and dividing through by $(\gamma -1)$, while defining $k$ as, \begin{equation} \fbox{$ k = \frac{\gamma+1}{\gamma-1} $} \label{eqn:k} \end{equation} one can rewrite the above expression as, \begin{equation} \fbox{$ W^2 = \frac{kM^2}{k-1 + M^2} $} \label{eqn:wfromm} \end{equation} Isolating $M^2$ in Eq. \ref{eqn:wfromm} yields an expression for the Mach number from the non-dimensional velocity, \begin{equation} \fbox{$ M^2 = \frac{W^2(k-1)}{k-W^2} $} \label{eqn:mfromw} \end{equation} Going back to the governing equation for radial flow (Eq. \ref{eqn:radial}) and rearranging slightly, \begin{displaymath} \frac{M^2-1}{2q} dq = \frac{1}{r} dr \end{displaymath} one can use the definition of the non-dimensional velocity (Eq. \ref{eqn:w}) from which one can write $dq = a^* dW$ and when combined with Eq. \ref{eqn:mfromw} allows the above relation to be expressed completely in terms of $W$. Integrating the resulting expression from the sonic point (at which point $r = r_{cr}$ and $W = M = 1$) to some general point $r$ one obtains, \begin{equation} \int_{r_{cr}}^{r}\frac{1}{r} dr = \frac{1}{2}\Big\{\int_{W=1}^{W}\frac{M^2}{W}dW - \int_{W=1}^{W}\frac{1}{W}dW\Big\} \label{eqn:rintraw} \end{equation} Equation \ref{eqn:rintraw} can be integrated analytically if one assumes a perfect gas (hence $k$ is a constant with respect to the variable of integration $W$) by letting $x = W^2$ (and thus $\frac{1}{2}dx = W dW$), \begin{displaymath} \ln(\frac{r}{r_{cr}}) = \frac{1}{2}\Big\{(k-1)\frac{1}{2}\int_{W=1}^{W}\frac{1}{k-x}dx - \ln(W)\vert_{W=1}^{W}\Big\} \end{displaymath} Re-substituting back for $W$ and applying the limits yields, \begin{displaymath} \ln(\frac{r}{r_{cr}}) = \ln\Big\{\frac{(k-W^2)^{\frac{1}{4}(1-k)}}{W^{\frac{1}{2}} (k-1)^{\frac{1}{4}(1-k)}}\Big\} \end{displaymath} taking the exponential of both sides and defining a non-dimensional radius, $\overline{r}$, as, \begin{equation} \fbox{$ \overline{r} = \frac{r}{r_{cr}} $} \label{eqn:rbar} \end{equation} yields, \begin{displaymath} \overline{r} = \frac{(k-1)^{\frac{1}{4}(k-1)}}{W^{\frac{1}{2}}(k-W^2)^{\frac{1}{4}(k-1)}} \end{displaymath} to phrase this result in a more usable form one can square both sides while multiplying the right side by $W_k/W_k$, substituting for $M$ using Eq. \ref{eqn:mfromw}, and rearranging to get, \begin{displaymath} \overline{r}^2 = \frac{M^k}{W^k}\frac{1}{M} = (\frac{M^2}{W^2})^{\frac{k}{2}}\frac{1}{M} \end{displaymath} But from Eq. \ref{eqn:wfromm} one can express $W$ in terms of $M$ to yield, \begin{equation} \fbox{$ \overline{r}^2 = \frac{1}{M}\Big\{\frac{k-1+M^2}{k}\Big\}^{\frac{k}{2}} $} \label{eqn:rbarfromm} \end{equation} Equation \ref{eqn:rbarfromm} now gives us a means of determining the Mach number vs. non-dimensional radius within the radial flow region of the nozzle. Another key quantity in the radial flow region is the integrated angle, $\theta$, which for radial flows can be shown to be equal to half the Prandtl-Meyer expansion angle for 2D flows. This angle represents the angle through which a supersonic flow must be turned in order to be accelerated from a Mach number of 1 to the given Mach number M, and can be expressed for a perfect gas as, \begin{equation} \fbox{$ \theta = \frac{1}{2}\Big\{k^{\frac{1}{2}}\tan^{-1}\Big[\frac{M^2-1}{k}\Big]^{\frac{1}{2}} - \tan^{-1}(M^2-1)^{\frac{1}{2}}\Big\} $} \label{eqn:intang} \end{equation} This angle can be used to help determine the boundaries of the radial flow region, as although Eq. \ref{eqn:rbarfromm} will yield a radial position for a given Mach number, the Mach number at points D, C, and B are all unknown. However, from the geometry of the nozzle itself, it can be seen that the difference between the integrated angles at points D and C must be equal to the physical angle COD in Fig. \ref{fig:nozzle} which is the set maximum expansion angle $\omega$. Therefore, if the Mach number at point D is known, then so is the integrated angle at this point. With this one can then fully determine point C using the maximum expansion angle and Eq. \ref{eqn:intang} to get the Mach number at C, then Eq. \ref{eqn:rbarfromm} to get the radial position of this point from the previously determined $M_C$. The same procedure can be used to find point B only in this case the difference between the integrated angles at points D and B is equal to twice the maximum expansion angle. Thus, using Eqs. \ref{eqn:rbarfromm} and \ref{eqn:intang} one can fully determine the Mach number distribution within the radial flow region provided the Mach number at point D is known. %--------------------------------------------------------------------------------------------- \subsubsection{Final Expansion Region} From the previous section, all that is required to fully describe the radial flow region is the Mach number at the end of this region, $M_D$. However, from the set nozzle parameters only the Mach number at point F is known since at this point the flow has been accelerated to the exit Mach number. Let the Mach number distribution between points D and F be described by a fifth order polynomial of the form, \begin{equation} W = D_0 + D_1\xi + D_2\xi^2 + D_3\xi^3 + D_4\xi^4 + D_5\xi^5 \label{eqn:dpoly} \end{equation} where along the axisymmetric axis, \begin{displaymath} \xi = \frac{\overline{r}-\overline{r}_D}{\overline{X}_{DF}} \end{displaymath} It should be noted that all the non-dimensional quantities above are non-dimensionalized by the critical radius, as was done in Eq. \ref{eqn:rbar}. In order to determine the co-efficients $D_0$ through $D_5$ one must make use of six boundary conditions. Therefore, specifying the velocity and its derivatives up to second order at each of the boundaries gives, \begin{table}[!h] \fontsizetable \begin{center} \begin{threeparttable} \tablecaption{Non-Dimensional Boundary Conditions} \begin{tabular}{ccccc} \toprule Point & $\xi$ & Velocity ($W$) & Acceleration ($W'$) & Impulse ($W''$)\\ \midrule D & 0 &$W_D$ & $(\frac{dW}{d\overline{r}})_D = W'_D$ & $(\frac{d^2W}{d\overline{r}^2})_D = W''_D$\\ F & 1 &$W_F$ & 0 & 0 \\ \bottomrule \end{tabular} %\begin{tablenotes} %\end{tablenotes} \label{table:dpoly} \end{threeparttable} \end{center} \end{table} where it is noted that, \begin{equation} \frac{dW}{d\xi} = D_1 + 2D_2\xi + 3D_3\xi^2 + 4D_4\xi^3 + 5D_5\xi^4 \label{eqn:dpolyfirst} \end{equation} \begin{equation} \frac{d^2W}{d\xi ^2} = 2D_2 + 6D_3\xi + 12D_4\xi^2 + 20D_5\xi^3 \label{eqn:dpolysecond} \end{equation} At point D, since $\xi = 0$, applying the first boundary condition yields a direct solution for $D_0$, \begin{equation} \fbox{$ D_0 = W(0) = W_D $} \label{eqn:d0} \end{equation} and similarly when applying the acceleration condition at point D one obtains $D_1$ (while noting that $d\xi X_{DF} = d\overline{r}$), \begin{equation} \fbox{$ D_1 = \frac{dW(0)}{d\xi} = \overline{X}_{DF}\frac{dW(0)}{d\overline{r}} = \overline{X}_{DF}W'_D $} \label{eqn:d1} \end{equation} Applying the last boundary condition at point D yields, \begin{equation} \fbox{$ D_2 = \frac{1}{2} \overline{X}_{DF}^2 W''_D $} \label{eqn:d2} \end{equation} Moving on to point F, skipping the velocity boundary condition for the moment and applying the acceleration condition yields, \begin{displaymath} D_4 = -\frac{1}{4}(D_1 + 2D_2 + 3D_3 + 5D_5) \end{displaymath} Using the impulse condition at point F yields for $D_5$, \begin{displaymath} D_5 = -\frac{1}{20}(2D_2 + 6D_3 + 12D_4) \end{displaymath} which after using the previous result for $D_4$ gives, \begin{displaymath} D_5 = \frac{1}{5}(3D_1 + 4D_2 + 3D_3) \end{displaymath} Now using the velocity condition at F and isolating $D_3$ yields, \begin{displaymath} D_3 = W_F - D_0 - D_1 - D_2 - D_4 - D_5 \end{displaymath} which when the results for $D_0, D_4,$ and $D_5$ are substituted results in, \begin{equation} \fbox{$ D_3 = 10(W_F - W_D) -6D_1 -3D_2 $} \label{eqn:d3} \end{equation} Going back and replacing $D_3$ in the expressions for $D_4$ and $D_5$ yields, \begin{equation} \fbox{$ D_4 = -15(W_F - W_D) + 8D_1 + 3D_2 $} \label{eqn:d4} \end{equation} \begin{equation} \fbox{$ D_5 = 6(W_F - W_D) - 3D_1 - D_2 $} \label{eqn:d5} \end{equation} At this point, it is noted that there are four apparent unknowns in the various co-efficients, $W_D, W_D', W_D''$, and $\overline{X}_{DF}$. In order to ensure a smooth increase in velocity along the axisymmetric axis between points D and F, it can be assumed that $D_5 = 0$ which then allows for the solution of $\overline{X}_{DF}$ from Eq. \ref{eqn:d5} as, \begin{equation} \overline{X}_{DF} = -\frac{3W_D'}{W_D''}\Big\{1 - \sqrt{1 + \frac{4W_D''(W_F-W_D)} {3W_D'^2}}\Big\} \label{eqn:xdf} \end{equation} At this point we have reduced the number of unknowns to three, since the non-dimensional length $\overline{X}_{DF}$ is now a function of $W_D, W_D'$, and $W_D''$. Since point D is part of the radial flow region, the analytical expressions developed for this type of flow can be used here as well. Going back to Eq. \ref{eqn:rbarfromm} and using the relation expressed in Eq. \ref{eqn:mfromw} one can obtain an equation for $\overline{r}^2$ in terms of $W$, \begin{equation} \fbox{$ \overline{r}^2 = \frac{1}{W}\Big\{\frac{(k-1)}{(k-W^2)}\Big\}^{\frac{(k-1)}{2}} $} \label{rbarfromw} \end{equation} Taking the natural logarithm of both sides and differentiating yields an expression for the first derivative of non-dimensional velocity with respect to the non-dimensional radius, \begin{equation} \frac{dW}{d\overline{r}}=\frac{2W(k-W^2)}{k(W^2-1)\overline{r}}=W' \label{eqn:drdw} \end{equation} The second derivative can also be found from Eq. \ref{eqn:drdw} and be shown to be equal to, \begin{equation} \frac{d^2W}{d\overline{r}^2}=\frac{-W'^2[3k-6W^2+(k+2)W^4]}{2W(k-W^2)(W^2-1)} = W''^2 \label{eqn:d2rdw2} \end{equation} With Eqs. \ref{eqn:drdw} and \ref{eqn:d2rdw2} one can find the values of $W_D'$ and $W_D''$ from the value of $W_D$ alone, thus reducing the number of unknowns involved in determining the desired polynomial co-efficients to one, namely $W_D$. This was also the sole unknown required for the complete determination of the radial flow region. To determine this value, one can again make use of Eq. \ref{eqn:xdf} by noting that the value under the root must be positive to ensure a real solution. Therefore, \begin{equation} (W_F - W_D)\geq-\frac{3}{4}\frac{W_D'^2}{W_D''} \label{eqn:wdiff} \end{equation} With this, one has a guide to selecting $W_D$, but the actual value must be chosen by the designer. As a good first guess, $W_F - W_D$ can be taken as $1.5\%$ of $W_F$ and increased until the criteria in Eq. \ref{eqn:wdiff} is satisfied. Once $W_F - W_D$ is determined, $W_D$ can be used to completely determine the centreline Mach number distribution from the beginning of the radial flow region through to the nozzle exit inclusive. %--------------------------------------------------------------------------------------------- \subsubsection{Initial Expansion Region} Having already established the Mach number distribution from the beginning of the radial flow region (point B, Fig. \ref{fig:nozzle}) to the nozzle exit, the only region remaining to be defined is the initial expansion region near the nozzle throat (from point A to B, Fig. \ref{fig:nozzle}). Since the final expansion region was modeled using a fourth order polynomial (recalling that the fifth co-efficient, $D_5$, was assumed zero), for consistency, the initial expansion region will be treated similarly. Thus one can write for the segment along the axisymmetric axis between points A and B, \begin{equation} W = C_0 + C_1\eta + C_2\eta^2 + C_3\eta^3 + C_4\eta^4 \label{eqn:cpoly} \end{equation} where along the axis, \begin{displaymath} \eta = \frac{\overline{r}-\overline{r}_A}{\overline{X}_{AB}} \end{displaymath} As for the final expansion region, all the non-dimensional lengths are divided by the critical radius (see Eq. \ref{eqn:rbar}). In order to determine the co-efficients $C_0$ through $C_4$ one again has the option of applying six boundary conditions. Specifying the velocity and its derivatives up to second order at each of the boundaries requires the derivatives of Eq. \ref{eqn:cpoly} up to second order while the boundary conditions themselves are, \begin{table}[!h] \fontsizetable \begin{center} \begin{threeparttable} \caption{Non-Dimensional Boundary Conditions} \begin{tabular}{ccccc} \toprule Point & $\eta$& Velocity ($W$) & Acceleration ($W'$) & Impulse ($W''$)\\ \midrule A & 0& 1 & $(\frac{dW}{d\overline{r}})_A = W'_A$ & $(\frac{d^2W}{d\overline{r}^2})_A = W''_A$\\ B & 1& $W_B$ & $(\frac{dW}{d\overline{r}})_B = W'_B$ & $(\frac{d^2W}{d\overline{r}^2})_B = W''_B$\\ \bottomrule \end{tabular} %\begin{tablenotes} %\end{tablenotes} \label{table:cpoly} \end{threeparttable} \end{center} \end{table} Since the radial flow region includes point B, the non-dimensional velocity and position at point B ($W_B$ and $\overline{r}_B$ respectively) are known quantities. As well, Eqs. \ref{eqn:drdw} and \ref{eqn:d2rdw2} can both be used to determine $W'_B$ and $W''_B$ making the boundary conditions at the end of the initial expansion region completely known. However, at the beginning of this region the flow has just reached sonic speed, thus the nozzle throat region near point A is an area of transonic flow. This requires a specialized technique to solve, since one dimensional theory predicts a straight sonic line at the exact geometric throat of the nozzle, whereas in real applications this line is curved with the sonic point lying a finite distance downstream of the geometric throat (labeled $\epsilon$ in Fig. \ref{fig:nozzle}) along the axisymmetric axis (Note: this line also \emph{starts} a finite distance upstream of the throat, but this region need not be considered in the present analysis). There are several methods available to describe this transonic region (Sauer's Method, Hall's Method, Kliegel's Method) however Kliegel's Method (which is a modified version of Hall's Method) will be used here. In Kliegel's analysis the non-dimensional velocity distribution along the axisymmetric axis in the throat region is given in the form of an equation in terms of $S$ (where $S = R + 1$). In obtaining this solution, the throat region is assumed to be a circular arc of radius ($\rho_t$) while the parameter $R$ is the ratio of this radius to the half height, or radius, of the nozzle throat ($h$) thereby creating the relation $Rh = \rho_t$. With the velocity distribution in this form one can obtain its derivatives for use in application to the boundary conditions, where at point A, \begin{equation} (\frac{dW}{d\overline{r}})_A = \lambda R_1\Big\{1 - \frac{4\gamma-3}{24S} + \frac{652\gamma^2 + 15\gamma + 333}{6912S^2}\Big\} \label{eqn:waprime} \end{equation} \begin{equation} (\frac{d^2W}{d\overline{r}^2})_A = (\lambda R_1)^2\Big\{1 - \frac{2\gamma}{3} + \frac{4\gamma^2 + 69\gamma + 15}{96S}\Big\} \label{eqn:wadprime} \end{equation} \begin{equation} (\frac{d^3W}{d\overline{r}^3})_A = (\lambda R_1)^3\Big\{\frac{4\gamma^2 - 57\gamma + 27}{24}\Big\} \label{eqn:watprime} \end{equation} where $\lambda = \sqrt{\frac{2}{(\gamma+1)S}}$ and $R_1 = \frac{r_{cr}}{h}$. As well as yielding these derivatives Kliegel's analysis also yields the sonic point's distance downstream of the geometric nozzle throat ($\epsilon$) as, \begin{equation} \overline{\epsilon}=\frac{\epsilon}{r_{cr}}=\frac{1}{4\lambda S}\Big\{1 - \frac{4\gamma -15} {72S} + \frac{412\gamma^2 + 270\gamma + 909}{10368S^2}\Big\} \label{eqn:epsbar} \end{equation} Therefore, if $S$ is known (which requires $R$ to be known) then the location of point A can be determined from Eq. \ref{eqn:epsbar}. However, the nozzle upper wall arc radius in the throat region, $\rho_t$, cannot simply be assigned a value like a design variable, but must be calculated based on further restrictions to be derived. In a manner similar to that in the final expansion region, the boundary conditions can be applied to Eq. \ref{eqn:cpoly} to obtain the various co-efficients $C_O$ through $C_4$. Starting with the boundary conditions at point A, through direct substitution (and noting that $d\eta X_{AB} = d\overline{r}$), \begin{equation} \fbox{$ C_0 = 1 $} \label{eqn:c0} \end{equation} \begin{equation} \fbox{$ C_1 = \overline{X}_{AB}W'_A $} \label{eqn:c1} \end{equation} \begin{equation} \fbox{$ C_2 = \frac{1}{2}\overline{X}_{AB}^2W''_A $} \label{eqn:c2} \end{equation} Considering next the boundary conditions at point B, the velocity condition yields (while substituting for $C_0$), \begin{displaymath} C_3 = (W_B - 1) - C_1 - C_2 - C_4 \end{displaymath} while the acceleration condition gives, \begin{displaymath} C_3 = \frac{1}{3}(\overline{X}_{AB}W'_B - C_1 - 2C_2 - 4C_4) \end{displaymath} Combining these two results yields for $C_4$, \begin{equation} \fbox{$ C_4 = -3(W_B -1) + W'_B\overline{X}_{AB} + 2C_1 + C_2 $} \label{eqn:c4} \end{equation} and from back substitution one can get $C_3$ as, \begin{equation} \fbox{$ C_3 = 4(W_B -1) - W'_B\overline{X}_{AB} - 3C_1 - 2C_2 $} \label{eqn:c3} \end{equation} At this point, one still has three unknowns, $W'_A$, $W''_A$ and $\overline{X}_{AB}$, in that although Eqs. \ref{eqn:waprime} and \ref{eqn:wadprime} can be used to find $W'_A$ and $W''_A$, the values of both $S$ and $R_1$ are needed to use these equations. Therefore, applying the remaining boundary condition at point B yields, \begin{displaymath} \frac{d^2W(1)}{d\eta ^2} = \overline{X}_{AB}^2\frac{d^2W(1)}{d\overline{r}^2} = \overline{X}_{AB}^2 W''_B = 2C_2 + 6C_3 + 12C_4 \end{displaymath} Using Eqs. \ref{eqn:c2}, \ref{eqn:c3} and \ref{eqn:c4} to replace $C_2$, $C_3$, and $C_4$ respectively yields a quadratic expression for $\overline{X}_{AB}$, \begin{equation} (W''_A - W''_B)\overline{X}_{AB}^2 + 6(W'_A + W'_B)\overline{X}_{AB} - 12(W_B - 1) \label{eqn:xabone} \end{equation} This reduces $\overline{X}_{AB}$ to a function of both remaining unknowns, but one is still left with finding values for $S$ and $R_1$. However, in applying Kliegel's Method it is suggested that the second derivative of the nozzle contour in the region approximately one diameter downstream of the geometric throat approximate the shape of a circle or hyperbola. This criteria is best satisfied if the co-efficient $C_3$ is held equal to the value $\frac{1}{6}W'''_A \overline{X}_{AB}^3$, \begin{displaymath} C_3 = \frac{1}{6}W'''_A \overline{X}_{AB}^3 = 4(W_B -1) - W'_B\overline{X}_{AB} - 3C_1 - 2C_2 \end{displaymath} which after using Eqs. \ref{eqn:c1} and \ref{eqn:c2} in the above yields, \begin{equation} W'''_A \overline{X}_{AB}^3 + 6W''_A\overline{X}_{AB}^2 + 6(W'_B + 3W'_A)\overline{X}_{AB} -24(W_B -1) = 0 \label{eqn:xabtwo} \end{equation} Unfortunately this does not completely define the problem, as the determination of the derivatives at point A is still problematic in that the variables $S$ and $R_1$ are not completely independent (as will be shown, $R_1$ depends on $S$). However, Eqs. \ref{eqn:xabone} and \ref{eqn:xabtwo} can be used as a means of checking the accuracy of the values for $S$ and $R_1$ given that once $\overline{X}_{AB}$ is found using a particular set of $S$ and $R_1$ values, one can then substitute this value and the corresponding derivative values into Eq. \ref{eqn:xabtwo} to check the result. If Eq. \ref{eqn:xabtwo} is satisfied, a solution has been obtained. Therefore, in order to determine the variables $S$ and $R_1$ (for use in finding the derivative values at point A) one can make use of the parameter $K^2$, which is the ratio of the actual massflow passing through the nozzle throat (with a curved sonic line) to the massflow passing through the nozzle assuming a straight sonic line (one dimensional flow). From Kliegel's Method this value can be expressed as, \begin{equation} K^2 = 1 - \frac{\gamma-1}{96S^2}\Big\{1 - \frac{8\gamma -27}{24S} + \frac{754\gamma^2 -757\gamma+3615}{2880S^2}\Big\} \label{eqn:massflow} \end{equation} while the ratio of critical radius to throat radius, $R_1$, can be expressed \begin{equation} R_1 = \frac{r_{cr}}{h} = \frac{1}{2}K \csc(\frac{\omega}{2}) \label{eqn:r1} \end{equation} Equations \ref{eqn:massflow} and \ref{eqn:r1} provide the link between $S$ and $R_1$. Similar to the final expansion region however, one is now forced to guess at a value for one of the required variables and then iterate slowly until all the criteria are met. In this case, one must first guess at a value of $S$ (or $R$ since $S = R + 1$). With this, one can get a value for $R_1$ from Eqs. \ref{eqn:massflow} and \ref{eqn:r1} and with these values one can then use Eqs. \ref{eqn:waprime}, \ref{eqn:wadprime}, and \ref{eqn:watprime} to get $W'_A$, $W''_A$, and $W'''_A$ which in turn can be used in Eq. \ref{eqn:xabone} to find a value for $\overline{X}_{AB}$. This value is then checked using Eq. \ref{eqn:xabtwo} and if this equation is not satisfied then the value of $S$ can be changed until the convergence criteria is met. Once $\overline{X}_{AB}$, $W'_A$, and $W''_A$ are determined, the co-efficients $C_0$ through $C_4$ can be found thereby completely determining the Mach number distribution along the axisymmetric axis in the initial expansion region. As a final note, since the nozzle contour is determined only at points along the characteristic mesh, the final velocity along the wall at the precise throat location cannot be determined using the method of characteristics since the point closest to the nozzle throat along the prescribed Mach number distribution is located a finite distance \emph{downstream} of the geometric throat. Therefore, making use of Kliegel's analysis once again, the throat wall velocity can be expressed as, \begin{equation} W_{throat} = 1 + \frac{1}{4S} - \frac{14\gamma - 57}{288 S^2} + \frac{2364\gamma^2 - 3915\gamma + 14337} {82944S^3} \label{eqn:uwall} \end{equation} %--------------------------------------------------------------------------------------------- \subsubsection{Nozzle Wall Determination} With the desired Mach number distribution established and the final Mach line originating from point F, one has everything required to use the two-dimensional method of characteristics to determine the flowfield within the diverging section of the nozzle. However, what is still lacking is a means of knowing when to stop marching the characteristics in the direction perpendicular to the nozzle axis. This is of critical importance as this stopping point determines the upper wall of the nozzle, which is the desired quantity. Therefore, going back to the radial flow region, one can make use of the stream function, $\psi$, and determine its value at the nozzle boundary. Since the difference between stream functions at any two points within a flow is equal to the mass flow between the streamlines on which the stream functions are taken, if one knows the stream function value on the nozzle axis, plus the value at \emph{ANY} point on the nozzle wall, then this difference must be constant given that the the mass flow through the nozzle is constant (or from another viewpoint, since the flow must follow the contour of the nozzle wall, the wall itself is a streamline. Knowing the value of the stream function at a single point along a streamline yields the value along all points of the streamline, thus finding the nozzle contour is equivalent to finding a streamline with a specific stream function value). Using the properties of radial flow, considering an incremental area on a sphere of radius $r$ bounded by the angles $\alpha$ and $\alpha + d\alpha$ one can write the differential stream function as \begin{equation} d\psi = \rho q (\pi r^2 \sin \alpha d\alpha) \label{eqn:diffstream} \end{equation} Integrating Eq. \ref{eqn:diffstream} from the nozzle axis to some angle $\alpha$, \begin{displaymath} \int_{\alpha=0}^{\alpha}d\psi = \rho q \pi r^2\int_{\alpha=0}^{\alpha}\sin \alpha d\alpha = \psi = -\rho q \pi r^2 \cos \alpha \vert_0^{\alpha} \end{displaymath} Therefore one can write, \begin{equation} \psi = \rho q \pi r^2 (1-\cos\alpha) \label{eqn:psi} \end{equation} where $\alpha$ is a physical angle between the axisymmetric axis, the origin of the radial flow zone, and some point above the axis (which is not to be confused with the integrated angle $\theta$ in Eq. \ref{eqn:intang}). Non-dimensionalizing this value by the critical radius $r_{cr}$, the sonic sound speed $a^*$, the total density (found from the nozzle chamber conditions) $\rho_o$, and $\pi$ yields, \begin{equation} \fbox{$ \overline{\psi} = \overline{\rho} W \overline{r}^2 (1-\cos\alpha) $} \label{eqn:psibar} \end{equation} where $\overline{\rho} = \frac{\rho}{\rho_o}$. Since point C lies within the radial flow region, Eq. \ref{eqn:psibar} applies here. Further, the physical angle $\alpha$ at point C is known since this is also the location of the maximum expansion angle, $\omega$. For isentropic flow the density ratio is a function of Mach number alone (and $\gamma$, but this is a property of the fluid, not the flow), which in turn is dependent only on $W$ within the radial flow region (see Eq. \ref{eqn:mfromw}). Therefore, with the centreline Mach number distribution (which yields $W_C$ and $\overline{r}_C$) one has all the information required to determine the value of the non-dimensional stream function at point C, which is a constant along the nozzle wall. With the value of the stream function at the nozzle wall fixed, all that remains is to calculate the incremental stream function at each intersection point when applying the method of characteristics. To do this, one requires the differential of the non-dimensional stream function with respect to a change in height above the axisymmetric axis (i.e. $\frac{d\overline{\psi}} {d\overline{y}}$). When the incremental change in $\overline{y}$ is small then a good approximation for $d\overline{\psi}$ (given by Evvard) is, \begin{equation} \int_{A}^{B}d\overline{\psi} = \overline{\psi}_B - \overline{\psi}_A = \Big\{(\frac{\overline{\rho}W}{4M\sin(\mu + \theta_f)})_A + (\frac{\overline{\rho}W} {4M\sin(\mu + \theta_f)})_B \Big\}(\overline{y}^2_B - \overline{y}^2_A) \label{eqn:dpsibar} \end{equation} where $\mu$ is the Mach angle ($\sin^{-1}[1/M]$), $\theta_f$ here is the flow angle ($\tan^{-1}[v/u]$), and $\overline{y}$ is the perpendicular distance from the axisymmetric axis divided by the critical radius ($y/r_{cr}$). In most cases, the intersection of two characteristics will not occur at the exact location of the nozzle wall (i.e. the value of the stream function will be slightly higher than the wall value at the final intersection) and hence an interpolation procedure must be used to determine the distance from the axisymmetric axis at which the stream function reaches the correct value ($\overline{\psi}_C$). The percentage of the last increment (in the direction normal to the axis) to be applied is calculated as, \begin{equation} \beta=\frac{\overline{\psi}_c - \overline{\psi}_{j-1}}{\overline{\psi}_j - \overline{\psi}_{j-1}} \label{eqn:percent} \end{equation} where $j$ increases as one travels away from the axis. Since $\overline{y}^2$ is proportional to $d\overline{\psi}$ (as can be seen in Eq. \ref{eqn:dpsibar}) the nozzle wall height is determined using, \begin{equation} \overline{y}_{wall}^2 = \overline{y}_{j-1}^2 + \beta(\overline{y}_j^2 - \overline{y}_{j-1}^2) \label{eqn:ywall} \end{equation} while the remaining quantities of interest at the nozzle wall are found using, \begin{displaymath} \overline{x}_{wall}=\overline{x}_{j-1} + \beta(\overline{x}_j-\overline{x}_{j-1}) \end{displaymath} \begin{displaymath} q_{wall}=q_{j-1} + \beta(q_j-q_{j-1}) \end{displaymath} \begin{displaymath} \theta_{wall}=\theta_{j-1} + \beta(\theta_j-\theta_{j-1}) \end{displaymath} where again, $\theta_j$ is the flow angle, \emph{NOT} the integrated angle from Eq. \ref{eqn:intang}. %--------------------------------------------------------------------------------------------- \subsubsection{Dimensionalizing Nozzle Wall Co-Ordinates} Having applied the two dimensional method of characteristics and determined the nozzle shape in terms of non-dimensional co-ordinates, all that remains is to calculate the physical co-ordinates. To do this one requires the value of $r_{cr}$ which was used to non-dimensionalize all the length variables. Considering the mass flow through the nozzle ($\dot{m}$), this quantity can be expressed as twice the value of the non-dimensional stream function at point C (i.e. $2\overline{\psi}_C$) since $\overline{\psi}_C$ is calculated for the upper portion of the axisymmetric nozzle only (from $\alpha = 0$ to $\omega$), thus using Eq. \ref{eqn:psibar} \begin{displaymath} \dot{m}=2\overline{\psi}_C=2\overline{\rho}_C W_C \overline{r}^2_C (1-\cos\omega) \end{displaymath} However, at the nozzle exit the mass flow can be expressed as, \begin{equation} \dot{m}=\rho_Fu_F(\pi h_{exit}^2) \label{eqn:massflowexit} \end{equation} since the flow is parallel to the axis at the exit (therefore $v = 0$) and the exit area is circular. Non-dimensionalizing the above expression in the same fashion as was done for $\overline{\psi}$ (thus dividing by $\rho_o$, $a^*$, $\pi$, and $r_{cr}$) and equating the two expressions for $\dot{m}$ yields, \begin{equation} \fbox{$ \overline{h}_{exit}^2=\frac{2\overline{\psi}_C}{\overline{\rho}_F W_F} $} \label{eqn:hexitbar} \end{equation} Assuming a one dimensional nozzle with a straight sonic line located at the geometric throat, one can express the mass flow rate through this idealized nozzle as, \begin{equation} \dot{m}_{1D} = \rho^* a^* (\pi h^2) \label{eqn:massflow1D} \end{equation} Recalling the analysis used in the throat region of the nozzle, Eq. \ref{eqn:massflow} yields the ratio of the actual mass flow through the nozzle to the massflow of a one-dimensional nozzle. Rewriting Eq. \ref{eqn:massflow} here for convenience, \begin{displaymath} K^2 = 1 - \frac{\gamma-1}{96S^2}\Big\{1 - \frac{8\gamma -27}{24S} + \frac{754\gamma^2 -757\gamma+3615}{2880S^2}\Big\} \end{displaymath} one can also calculate this parameter using the ratio of Eq. \ref{eqn:massflowexit} to Eq. \ref{eqn:massflow1D}, \begin{displaymath} \frac{\dot{m}}{\dot{m}_{1D}}=\frac{\rho_F u_F \pi h_{exit}^2}{\rho^* a^* \pi h^2}= \frac{\overline{\rho}_F W_F \overline{h}_{exit}^2}{\overline{\rho}^*\overline{h}^2}=K^2 \end{displaymath} which allows an expression for $\overline{h}^2$ to be written as, \begin{equation} \fbox{$ \overline{h}^2 = \frac{\overline{\rho}_F W_F \overline{h}_{exit}^2}{K^2 \overline{\rho}^*} $} \label{eqn:hbar} \end{equation} Using Eq. \ref{eqn:hexitbar} to find $\overline{h}_{exit}$ and then Eq. \ref{eqn:hbar} to find $\overline{h}$, one can then find $r_{cr}$ from the definition of $\overline{h}$, \begin{equation} \overline{h}=\frac{h}{r_{cr}} \label{eqn:rcr} \end{equation} %---------------------------------------------------------------------------------------------- \subsection{Viscous Nozzle Design} The previous section details the procedure for designing an axisymmetric nozzle in the absence of a boundary layer. However, in practice a nozzle created using this design procedure will in fact have a turbulent boundary layer over the majority of its length. Therefore, a method is required to adjust the inviscid nozzle contour to account for the presence of the boundary layer. One such method is that of Edenfield, where experimental correlations are used to determine the mass flow deficit, or displacement thickness ($\delta^*$), of the boundary layer which is then related to a shift ($\Delta y$) of the nozzle contour in a direction normal to the axis. Since the displacement thickness is simply the height of a region of flow (at the freestream density and velocity) that when added to the actual mass flow (including boundary layer effects, i.e., slower moving flow near the surface) would equal the theoretical massflow through the inviscid contour in the absence of a boundary layer, one can use this value to adjust the nozzle shape to maintain the inviscid mass flow value. However, since $\delta^*$ is calculated normal to the nozzle surface (which has a slope $\theta_{wall}$) while $\Delta y$ is normal to the axisymmetric axis, a small correction needs to be applied. Considering the mass flow deficit (per unit depth) calculated from the displacement thickness, \begin{displaymath} \Delta \dot{m} = \rho q \delta^* \end{displaymath} while if the nozzle correction height is used, \begin{displaymath} \Delta \dot{m} = \rho u \Delta y \end{displaymath} where here $u$ is used instead of $q$ as $\Delta y$ is parallel to the velocity component $v$. Noting that $u = q\cos\theta_{wall}$ and making the mass flow deficit equal as calculated from either height yields, \begin{equation} \fbox{$ \Delta y = \frac{\delta^*}{\cos\theta_{wall}} $} \label{eqn:deltay} \end{equation} If the upper wall is increased in height by an amount equal to $\Delta y$ then the nozzle contour accounting for viscosity can be written as, \begin{equation} y_{viscous} = y_{inviscid} + \Delta y = y_{inviscid} + \frac{\delta^*}{\cos\theta_{wall}} \label{eqn:yvis} \end{equation} In order to calculate the displacement thickness, Edenfield suggests the relation, \begin{equation} \delta^* = \frac{21}{50}\frac{x}{Re_{ref}^{0.2775}} \label{eqn:dispthick} \end{equation} where $x$ is the physical location downstream as measured from the nozzle throat while the reference Reynolds number is defined as, \begin{equation} Re_{ref}=\frac{\rho_{ref}q_{inv}x}{\mu_{ref}} \label{eqn:reyref} \end{equation} where $q_{inv}$ is the velocity calculated from the inviscid profile at the given $x$ location, while $\rho_{ref}$ and $\mu_{ref}$ are calculated at the reference temperature and the freestream pressure (which is the pressure at the $x$ location assuming no viscosity). The reference temperature is found from the reference enthalpy, $h_{ref}$, defined as, \begin{equation} h_{ref}=\frac{1}{2}(h_{wall} + h_{inv}) + \frac{11}{50}(h_{aw} - h_{inv}) \label{eqn:href} \end{equation} where $h_{wall}$ is the specific enthalpy calculated at the specified wall temperature, $h_{aw}$ is the specific adiabatic wall enthalpy, and $h_{inv}$ is the specific enthalpy calculated from the inviscid contour at the given $x$ location, \begin{equation} h_{inv}=h_{tot} - \frac{1}{2}q_{inv}^2 \label{eqn:hinv} \end{equation} To calculate the specific adiabatic wall enthalpy Edenfield uses (for turbulent boundary layers), \begin{equation} h_{aw} = h_{inv} + \frac{9}{10}(h_{tot} - h_{inv}) \label{eqn:haw} \end{equation} With Eqs. \ref{eqn:yvis} - \ref{eqn:haw} one has all the information required to adjust the inviscid contour to account for the presence of a turbulent boundary layer. To calculate the boundary layer thickness itself, $\delta_{bound}$, the following relations can be applied, \begin{equation} \delta_{bound}=\frac{11}{10}x(\frac{\frac{39}{200}M_{inv}^{\frac{3}{8}}}{Re_{x}^{\frac{83}{500}}}) \label{eqn:deltab} \end{equation} where \begin{equation} Re_x=\frac{\rho_{inv}q_{inv}x}{\mu_{inv}} \label{eqn:reyx} \end{equation} and finally, \begin{equation} y_{bound}=y_{vis} - \frac{\delta_{bound}}{\cos\theta_{wall}} \label{eqn:ybound} \end{equation} %---------------------------------------------------------------------------------------------- \subsection{Subsonic Nozzle Contour} Although the design of the nozzle has been detailed in full from the throat to the exit, one may wish to add a subsonic portion upstream of the throat. Since uniform flow is desired at the throat, a contoured upper wall can be used to produce the desired results. One example of such an axisymmetric contour is, \begin{equation} h_{wall} = \frac{h_{throat}}{ \sqrt{1 - \Big\{1- (\frac{h_{throat}}{h_{chamber}})^2\Big\} \frac{(1-\frac{x^2}{l^2})^2}{1+(\frac{x^2}{3l^2})^3} }} \label{eqn:subsonic} \end{equation} where $l$ is the distance between the combustion chamber exit and the nozzle throat (which can be taken as a percentage of the length of the diverging portion of the nozzle) while $x$ is the distance of the location under consideration as measured from the combustion chamber exit. The value of $h_{chamber}$ can be determined from the isentropic relation for areas in a converging/diverging duct (remembering that for an axisymmetric nozzle, the area is proportional to the \emph{square} of the radius, or nozzle half height $h$), \begin{equation} (\frac{h_{chamber}}{h_{throat}})^2 = \frac{A}{A^*} = \frac{1}{M_{chamber}}\Big\{(\frac{2}{\gamma +1}) (1 + \frac{\gamma-1}{2}M_{chamber}^2)\Big\}^\frac{\gamma +1}{2(\gamma -1)} \label{eqn:area} \end{equation} and the assumption of a negligible velocity exiting the combustion chamber (e.g. $M_{chamber} = 0.1$). %-------------------------------------------------------------------------------------------------- \section{Results} \subsection{Kerosene/Air Combustion} The combustion of kerosene with air can be written as follows, \begin{equation} \phi C_{12}H_{24} + 18 (O_2 + 3.76 N_2) \longrightarrow 12 H_2O + 12 CO_2 + 67.68 N_2 \label{eqn:kerocombair} \end{equation} where air is treated on a kmol basis to be composed of $21\% O_2$ and $79\% N_2$ (and the nitrogen is treated as inert during the combustion process). The fuel to air ratio on a mass basis requires a knowledge of the molecular weight of air. However, one must be careful to make a distinction between a \emph{kmol of air} and a kmol of an individual species, since air is actually a composite species of two more basic species (oxygen and nitrogen). Thus on a kmol basis this value can be calculated as 28.8503 $kg_{air}$/kmol, or, since one kmol of air actually contains 4.76 kmols of primary species (i.e. 4.76 kmol/$kmol_{air}$) then molecular weight of air per \emph{kmol of air} is actually 4.76 x 28.8503 = 137.3274 $kg_{air}/kmol_{air}$. Therefore from Eq. \ref{eqn:kerocombair} for every 1 kmol of fuel there are 18 kmols of air, which is actually 18 x 4.76 = 85.68 kmols, and thus the stoichiometric fuel to air ratio on a molar basis is $(\frac{f}{a})_{st}$ 1/85.68 or 0.01167. On a mass basis using the molecular weight calculated above for air of 28.8503 kg/kmol and a molecular weight for kerosene of 168.32256 kg/kmol ($C_{12}H_{24}$) one obtains $(\frac{f}{a})_{st}$ = 0.06809. Applying the Gibbs Minimization Technique and solving for the nineteen species shown in Table \ref{table:spmapair} yields the equilibrium composition shown in Fig. \ref{fig:kero_air}(B) (where the equivalence ratio is unity and an initial temperature and pressure of 300 K and 1.5 MPa respectively are used). \begin{figure}[hb] \begin{center} \begin{subfigure}[]{ \psfrag{y}[c][c][0.7][0]{Temperature [K]} \psfrag{z}[c][c][0.7][0]{$\gamma$} \psfrag{x}[c][c][0.7][0]{Equivalence Ratio} \psfrag{a}[l][l][0.45][0]{Equilibrium Temperature} \psfrag{b}[l][l][0.45][0]{Throat Temperature} \psfrag{c}[l][l][0.45][0]{$\gamma$} \includegraphics[width=7cm]{kero_air_tempvsphi.eps}} \end{subfigure} \begin{subfigure}[]{ \psfrag{y}[c][c][0.7][0]{Mass Fraction ($c_i$)} \psfrag{x}[c][c][0.7][0]{Species} \includegraphics[width=7cm]{kero_air_mass_phi1.eps}} \end{subfigure} % One can specify [angle=x,width=y,height=z] for the graphics \caption{Equilibrium combustion results for (A) variable equivalence ratio and (B) equivalence ratio of 1 (stoichiometric combustion)} \label{fig:kero_air} \end{center} \end{figure} \begin{table}[h] \fontsizetable\begin{center} \begin{threeparttable} \tablecaption{Species Map for Figure \ref{fig:kero_air}} \begin{tabular}{ccccccccccccccccc} \toprule 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9\\ \midrule $H_2$ & $O_2$ & $H$ & $O$ & $OH$ & $H_2O$ & $HO_2$ & $C_2H_2$ & $C_2H_4$ & $CO$ \\ \midrule 10 & 11 & 12 & 13 & 14 & 15 & 16 & 17 & 18\\ \midrule $CO_2$ & $CH_3$ & $CH_4$ & $H_2CO$ & $C_6H_{12}O$ & $C_{12}H_{24}$ & $N$ & $NO$ & $N_2$\\ \bottomrule \end{tabular} %\begin{tablenotes} %\end{tablenotes} \label{table:spmapair} \end{threeparttable} \end{center} \end{table} One can see from Fig. \ref{fig:kero_air}(A) that as the fuel to air ratio is raised from fuel lean conditions (low $\phi$) to a $\phi$ of approximately 1, the equilibrium combustion temperature rises reaching a peak of approximately 2400 K (which assuming isentropic expansion yields a peak throat temperature of approximately 2100 K). At stoichiometric conditions, the complete combustion mass fractions of each species can be determined directly from Eq. \ref{eqn:kerocombair} by multiplying the number of kmols of each species by its molecular weight and dividing by the total mass of either the products or reactants (as mass is conserved during the combustion process). By doing this one finds that the mass fraction of water ($H_2O$, species 5) is approximately 0.082 while that of carbon dioxide ($CO_2$, species 10) is approximately 0.200. Both of these values are far less than the stoichiometric mass fraction of nitrogen ($N_2$, species 18) of 0.718. Comparing these values with those shown in Fig. \ref{fig:kero_air}(B) one can see that the Gibbs Minimization Technique, which although based on the assumption of chemical equilibrium and not complete combustion, appears very accurate in that it matches the complete combustion solution closely. This would indicate that the complete combustion model in Eq. \ref{eqn:kerocombair} is an accurate model of the combustion process in this case. %---------------------------------------------------------------------------------------------------------- \subsection{Kerosene/Pure Oxygen Combustion} The complete combustion reaction for the combustion of kerosene ($C_{12}H_{24}$) with pure diatomic oxygen ($O_2$) (which would be the case in a kerosene rocket chamber) can be written as follows, \begin{equation} \phi C_{12}H_{24} + 18 O_2 \longrightarrow 12 H_2O + 12 CO_2 \label{eqn:kerocombo2} \end{equation} which on a molar basis yields a fuel to air ratio, $(\frac{f}{a})_{st}$, of 1/18 or 0.05556. Using molecular weights of 168.32256 kg/kmol and 31.9988 kg/kmol for kerosene and oxygen respectively yields a $(\frac{f}{a})_{st} = 0.2916$ on a mass basis. Applying the Gibbs Minimization Technique (using sixteen species in this case as there are no species containing nitrogen) yields the equilibrium composition shown in Fig. \ref{fig:kero_oxy}(B) with initial temperature and pressure of 300 K and 1.5 MPa respectively. \begin{figure}[h] \begin{center} \begin{subfigure}[]{ \psfrag{y}[c][c][0.7][0]{Temperature [K]} \psfrag{z}[c][c][0.7][0]{$\gamma$} \psfrag{x}[c][c][0.7][0]{Equivalence Ratio} \psfrag{a}[l][l][0.45][0]{Equilibrium Temperature} \psfrag{b}[l][l][0.45][0]{Throat Temperature} \psfrag{c}[l][l][0.45][0]{$\gamma$} \includegraphics[width=7cm]{kero_oxy_tempvsphi.eps}} \end{subfigure} \begin{subfigure}[]{ \psfrag{y}[c][c][0.7][0]{Mass Fraction ($c_i$)} \psfrag{x}[c][c][0.7][0]{Species} \includegraphics[width=7cm]{kero_oxy_phi1.eps}} \end{subfigure} % One can specify [angle=x,width=y,height=z] for the graphics \caption{Equilibrium combustion results for (A) variable equivalence ratio and (B) equivalence ratio of 1 (stoichiometric combustion)} \label{fig:kero_oxy} \end{center} \end{figure} \begin{table}[h] \fontsizetable \begin{center} \begin{threeparttable} \tablecaption{Species Map for Figure \ref{fig:kero_oxy} and Figure \ref{fig:kero_oxy_multiphi}} \begin{tabular}{cccccccccccccccc} \toprule 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8\\ \midrule $H_2$ & $O_2$ & $H$ & $O$ & $OH$ & $H_2O$ & $HO_2$ & $C_2H_2$ & $C_2H_4$\\ \midrule 9 & 10 & 11 & 12 & 13 & 14 & 15 & \\ \midrule $CO$ & $CO_2$ & $CH_3$ & $CH_4$ & $H_2CO$ & $C_6H_{12}O$ & $C_{12}H_{24}$ & \\ \bottomrule \end{tabular} %\begin{tablenotes} %\end{tablenotes} \label{table:spmap} \end{threeparttable} \end{center} \end{table} One of the first things to note is the significant presence of $O_2$ (species 1) in the post combustion mixture in Fig. \ref{fig:kero_oxy}(B). Given that the equivalence ratio in this graph is set to stoichiometric conditions ($\phi = 1$), one would expect that all the available oxidizer would be consumed by the available fuel (as was the case in the kerosene/air combustion case). However, the significant difference between the two combustion cases is the equilibrium temperature. When burned with pure $O_2$ as opposed to air, the resulting temperature is increased by approximately 1000 K. Around temperatures of 3500 K, many of the species involved in the combustion process start to dissociate (for example, $O_2$ starts to dissociate around 2000 K and is nearly completely dissociated by 4000 K at a pressure of 1 atm). From Fig. \ref{fig:kero_oxy}(B) one can see that the mass fractions of water and carbon dioxide (species 5 and 10) are approximately 0.21 and 0.30 respectively, which are markedly different from the complete combustion values calculated directly from Eq. \ref{eqn:kerocombo2} of 0.29 and 0.71. Given the drastic reduction in the presence of $CO_2$ as well as the reduction in $H_2O$ (although not as dramatic), it can be inferred that at the given equilibrium temperature these two combustion products start to dissociate into more basic species such as carbon monoxide ($CO$, species 9) and $OH$ (species 4). This dissociation process allows for the formation of $O_2$ \emph{during combustion} and thus its presence does not reflect a fuel lean mixture but rather a combustion temperature too high to consider the complete combustion model an accurate assessment of the combustion process. Indeed, of this $O_2$ formed, some of it dissociates into $O$ (species 3) as shown in Fig. \ref{fig:kero_oxy}(B) which is to be expected given the temperature (although clearly the $O_2$ dissociation process is not as complete as might be expected given the temperature, which is due partly to the pressure at which combustion is taking place. At a pressure of 1.5 MPa, which is approximately 15 atm, dissociation is reduced when compared to lower pressures at the same temperature). \begin{figure}[!hb] \begin{center} \psfrag{y}[c][c][0.7][0]{Mole Fraction} \psfrag{x}[c][c][0.7][0]{Species} \psfrag{a}[l][l][0.7][0]{RP1 ($CH_{1.96}$)} \psfrag{b}[l][l][0.7][0]{Kerosene ($C_{12}H_{24}$)} \includegraphics[width=8cm]{kero_oxycomprp1.eps} % One can specify [angle=x,width=y,height=z] for the graphics \caption{Equilibrium combustion results at an equivalence ratio of 1.33 for RP1, $\phi CH_2 + 1.5O_2 \to H_2O + CO_2$ (taken from Hill and Peterson)} \label{fig:rp1comp} \end{center} \end{figure} This result is seen with other hydrocarbon fuels as well, as shown in Fig. \ref{fig:rp1comp}. Here, the results (taken from Hill and Peterson) of an equilibrium combustion calculation are shown for RP1 ($CH_{1.96}$) which has approximately the same ratio of carbon to hydrogen as kerosene ($C_{12}H_{24}$). These results are for an equivalence ratio of 1.33 (which is approximately the equivalence ratio at which the peak equilibrium temperature is reached, see Fig. \ref{fig:kero_oxy}(A)) with an initial temperature and pressure of 298 K and 6.89 MPa respectively. As can be seen, the combustion of kerosene at the same conditions matches the results for RP1, with the \emph{mole fraction} (as opposed to mass fraction as used in Fig. \ref{fig:kero_oxy} (B)) of $CO_2$ being significantly depleted, $CO$ amounts being elevated, and residual $O_2$ being present despite the fact that the mixture is fuel rich. Going back to the conditions for which the results in Fig. \ref{fig:kero_oxy}(B) are obtained, Fig. \ref{fig:kero_oxy_multiphi} shows the mass fractions of the various species over an increasing range of equivalence ratios, from a fuel lean mixture of $\phi = 0.5$ to a fuel rich mixture with $\phi = 4.5$. As can be seen, the mass fraction distribution at $\phi = 3$ exhibits a change in composition, from four species to two, as the decreasing amounts of both water and carbon dioxide have finally reached zero leaving only hydrogen and carbon monoxide. However past this point at an equivalence ratio of 3.5, there are again four species, only this time the new species are those arising out of the partial oxidization of kerosene, i.e., the kerosene is only partly broken down leaving some lighter unburned hydrocarbons behind such as $C_2H_2$ and $C_2H_4$. This sharp change in mixture composition, not merely changing the amounts of given species, but actually changing the species present in the mixture, is responsible for the sharp change in $\gamma$ seen in Fig. \ref{fig:kero_oxy}(A) at an equivalence ratio of 3. \begin{figure}[hp] \begin{center} \psfrag{y}[c][c][0.7][0]{Mass Fraction ($c_i$)} \psfrag{x}[c][c][0.7][0]{Species} \subfigure[$\phi=0.5$]{ \includegraphics[width=5cm]{kero_oxy_phi0.5.eps}} \subfigure[$\phi=1.0$]{ \includegraphics[width=5cm]{kero_oxy_phi1.0.eps}} \subfigure[$\phi=1.5$]{ \includegraphics[width=5cm]{kero_oxy_phi1.5.eps}} \subfigure[$\phi=2.0$]{ \includegraphics[width=5cm]{kero_oxy_phi2.0.eps}} \subfigure[$\phi=2.5$]{ \includegraphics[width=5cm]{kero_oxy_phi2.5.eps}} \subfigure[$\phi=3.0$]{ \includegraphics[width=5cm]{kero_oxy_phi3.0.eps}} \subfigure[$\phi=3.5$]{ \includegraphics[width=5cm]{kero_oxy_phi3.5.eps}} \subfigure[$\phi=4.0$]{ \includegraphics[width=5cm]{kero_oxy_phi4.0.eps}} \subfigure[$\phi=4.5$]{ \includegraphics[width=5cm]{kero_oxy_phi4.5.eps}} % One can specify [angle=x,width=y,height=z] for the graphics \caption{Equilibrium combustion mass fractions at various equivalence ratios for kerosene/oxygen ($T_{inj} = 300$ K, $p^o = 1.5$ MPa)} \label{fig:kero_oxy_multiphi} \end{center} \end{figure} To summarize, the presence of residual $O_2$ at equivalence ratios of less than 3 and greater than 1 does not indicate a fuel lean mixture, but rather that the combustion process represented by Eq. \ref{eqn:kerocombo2} is no longer valid (as partial dissociation of the combustion products occurs due to the elevated temperature). Also, if one assumes that the combustion process occurs as follows (where the combustion products are now $CO$ and $H_2$), \begin{equation} \phi C_{12}H_{24} + 6 O_2 \longrightarrow 12 CO + 12 H_2 \label{eqn:kerocombo2coh} \end{equation} then the ($\frac{f}{a}$) is 1/6 or 0.1667 on a molar basis or 0.8767 on a mass basis. Given molecular weights of 28.0104 kg/kmol and 2.01588 kg/kmol for $CO$ and $H_2$ respectively this leads to mass fractions of 0.932 for $CO$ and 0.067 for $H_2$ assuming Eq. \ref{eqn:kerocombo2coh}. These are indeed the values in Fig. \ref{fig:kero_oxy_multiphi}(F). And comparing this fuel to air (or oxygen in this case) ratio to the stoichiometric value determined from Eq. \ref{eqn:kerocombo2} of 0.2916 one realizes that $\frac{f}{a} = 0.8767$ is indeed 3 times this value, therefore $\phi = 3$. Thus at equivalence ratios higher than 3, the complete combustion model of Eq. \ref{eqn:kerocombo2} is totally inaccurate and must be replaced with Eq. \ref{eqn:kerocombo2coh}. %--------------------------------------------------------------------------------------------- \subsection{Nozzle Design Results} The following nozzle designs are for a combustion mixture of kerosene ($C_{12}H_{24}$) and pure oxygen ($O_2$) at an equivalence ratio, $\phi$, of one (based on the complete combustion reaction in Eq. \ref{eqn:kerocombo2}), \begin{displaymath} \phi C_{12}H_{24} + 18 O_2 \longrightarrow 12 H_2O + 12 CO_2 \label{eqn:kerocombo2b} \end{displaymath} The combustion is assumed to occur at a constant pressure of 1.5 MPa while the total temperature at the end of the combustion process is assumed to be equal to the equilibrium combustion temperature. For an input fuel temperature of 300 K this yields a total temperature of 3533.55 K. The pertinent combustion variables are summarized in Table \ref{table:mixture} while the resulting mixture composition on both a mass and mole fraction basis is shown in Fig. \ref{fig:mixcomp}. The equilibrium combustion calculations consider a total of sixteen possible species as listed in Table \ref{table:speciesmap}. \begin{table}[!h] \fontsizetable \begin{center} \begin{threeparttable} \tablecaption{Mixture Properties} \begin{tabular}{cc} \toprule Chamber Conditions & Value \\ \midrule Equivalence Ratio, $\phi$ & 1\\ Fuel Input Temperature & 300 K\\ Total Pressure & 1.5 MPa\\ Post Combustion Equilibrium Temperature & 3533.55 K\\ Post Combustion Specific Heat Ratio, $\gamma$ & 1.2175998\\ Post Combustion Mixture Molecular Weight & 24.412 kg/kmol\\ \bottomrule \end{tabular} %\begin{tablenotes} %\end{tablenotes} \label{table:mixture} \end{threeparttable} \end{center} \end{table} \begin{table}[!h] \fontsizetable \begin{center} \begin{threeparttable} \tablecaption{Nozzle Design Parameters} \begin{tabular}{cc} \toprule Nozzle Design Variable & Value \\ \midrule Maximum Expansion Angle, $\omega$ & $10^o$\\ Throat Radius & 1 cm\\ Exit Mach Number, $M_e$ & 3, 4, 5\\ Wall Temperature & 800 K\\ Mach Number at Combustion Chamber Exit & 0.1 \\ Subsonic Section Length (\% of Supersonic Length)& 25\% \\ \bottomrule \end{tabular} %\begin{tablenotes} %\end{tablenotes} \label{table:nozcomp} \end{threeparttable} \end{center} \end{table} \begin{table}[!h] \fontsizetable \begin{center} \begin{threeparttable} \tablecaption{Species Map for Figure \ref{fig:mixcomp}} \begin{tabular}{cccccccccccccccc} \toprule 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8\\ \midrule $H_2$ & $O_2$ & $H$ & $O$ & $OH$ & $H_2O$ & $HO_2$ & $C_2H_2$ & $C_2H_4$\\ \midrule 9 & 10 & 11 & 12 & 13 & 14 & 15 & \\ \midrule $CO$ & $CO_2$ & $CH_3$ & $CH_4$ & $H_2CO$ & $C_6H_{12}O$ & $C_{12}H_{24}$ & \\ \bottomrule \end{tabular} %\begin{tablenotes} %\end{tablenotes} \label{table:speciesmap} \end{threeparttable} \end{center} \end{table} \begin{figure}[!h] \begin{center} \psfrag{y}[c][c][0.7][0]{Mass Fraction ($c_i$)} \psfrag{x}[c][c][0.7][0]{Species} \subfigure[Mass Fractions]{ \includegraphics[width=5.5cm]{kero_o2_mass_phi1.eps}} \psfrag{y}[c][c][0.7][0]{Mole Fraction ($\chi_i$)} \subfigure[Mole Fractions]{ \includegraphics[width=5.5cm]{kero_o2_mole_phi1.eps}} % One can specify [angle=x,width=y,height=z] for the graphics \caption{Post combustion mixture composition entering nozzle} \label{fig:mixcomp} \end{center} \end{figure} \begin{figure}[!hb] \begin{center} \psfrag{x}[c][c][0.7][0]{x [m]} \psfrag{y}[c][c][0.7][0]{y [m]} \psfrag{b}[l][c][0.5][0]{Inviscid Contour} \psfrag{a}[l][c][0.5][0]{Final Contour} \psfrag{c}[l][c][0.5][0]{Boundary Layer} \subfigure[Inviscid contour and boundary layer for $M_e = 5$]{ \includegraphics[width=5.5cm]{mach5inv.eps}} \subfigure[Complete axisymmetric nozzle shape for $M_e = 5$]{ \includegraphics[width=5.5cm]{scalewholenozzle.eps}} \caption{Axisymmetric Nozzle Design} \label{fig:inviscid} \end{center} \end{figure} \begin{figure}[ht] \begin{center} \psfrag{c}[l][c][0.65][0]{$M_e = 3$} \psfrag{b}[l][c][0.65][0]{$M_e = 4$} \psfrag{a}[l][c][0.65][0]{$M_e = 5$} \psfrag{x}[c][c][0.7][0]{x [m]} \psfrag{y}[c][c][0.7][0]{y [m]} \includegraphics[width=10cm]{nozcomp.eps} \caption{Nozzle designs for various exit Mach numbers} \label{fig:contcomp} \end{center} \end{figure} For an exit Mach number of 5, Fig. \ref{fig:inviscid}(A) shows the inviscid contour created and the resulting final contour after being adjusted for the presence of the boundary layer (which is also shown in the figure). As one can see, the resulting boundary layer at the nozzle exit is quite large which results in many cases in a truncation of the nozzle length to maximize the resulting high velocity core region. Adding a subsonic portion ahead of the throat with a length set at 25\% of the diverging length and a chamber Mach number of 0.1 yields the nozzle shape shown in Fig. \ref{fig:inviscid}(B). Various nozzle designs are shown in Fig. \ref{fig:contcomp} for nozzles with several different exit Mach numbers (while all other input variables remain constant). %--------------------------------------------------------------------------------------------------------- \subsection{Computational Results} The preceding nozzle design is used in an axisymmetric, implicit, steady state computational code (which uses the Wilcox $k-\omega$ two equation turbulence model) to verify the predictions made (this code is called WARP, developed by B. Parent). A uniform grid with 150 points in the downstream direction (parallel to the nozzle axis) and 50 points in the cross stream direction is used. Figure \ref{fig:warpcent} shows the Mach number along the nozzle centreline as compared to both the desired Mach number distribution (as found from the method previously described) and the isentropic area/mach number relation (Eq. \ref{eqn:area}). \begin{figure}[!h] \begin{center} \psfrag{y}[c][c][0.7][0]{Mach Number} \psfrag{x}[c][c][0.7][0]{Distance along Centreline [m]} \psfrag{a}[l][c][0.5][0]{Computational Result (150 x 50 Grid)} \psfrag{b}[l][c][0.5][0]{Pre-specified Mach Number Distribution} \psfrag{c}[l][c][0.5][0]{Isentropic Mach Number Distribution (Eq \ref{eqn:area})} \includegraphics[width=9cm]{warpcentcomp.eps} % One can specify [angle=x,width=y,height=z] for the graphics \caption{Mach number along nozzle centreline} \label{fig:warpcent} \end{center} \end{figure} As can be seen, although the computed Mach number at the exit of approximately 5.6 is slightly higher than the design Mach number of 5, it does follow the desired contour as the velocity is increased from Mach one at the throat through the expanding portion of the nozzle. In fact, as can be seen up to a distance of approximately 0.5 m, the predicted increase in Mach number follows the desired increase quite well. It is also noted that the pre-specified Mach number distribution also follows the isentropic relation very closely, which is to be expected given the manner in which the nozzle is designed. In using the two dimensional method of characteristics for irrotational flow, from Crocco's theorem one can conclude that since the conditions at the end of the combustion chamber are assumed uniform (i.e. constant enthalpy everywhere) then the flow must be homentropic (constant entropy everywhere). If the flow is homentropic, then it must also be isentropic (entropy is constant along a given streamline, but not necessarily constant \emph{between} streamlines) and hence Eq. \ref{eqn:area} must apply. \begin{figure}[p] \begin{center} \psfrag{y}[c][c][0.7][0]{$\gamma$[m]} \psfrag{x}[c][c][0.7][0]{x[m]} \psfrag{z}[c][c][0.7][0]{Temperature [K]} \psfrag{a}[l][c][0.5][0]{Nozzle Contour} \psfrag{b}[l][c][0.5][0]{Computed $\gamma$} \psfrag{c}[l][c][0.5][0]{Computed Temperature} \psfrag{d}[l][c][0.5][0]{Isentropic Temperature} \includegraphics[width=9cm]{warpgamma.eps} % One can specify [angle=x,width=y,height=z] for the graphics \caption{Variation of Temperature and Specific Heat Ratio along Nozzle Centreline} \label{fig:warpgamma} \end{center} \end{figure} The slight decrease in Mach number in the computational solution near the nozzle exit is caused by an increase in temperature at this location, as can be seen in Fig. \ref{fig:warpgamma}. Near the nozzle exit, the slope of the wall is nearly parallel with the nozzle axis and is thus approximately a constant area duct. Viscous effects in a constant area duct cause the temperature to rise and the Mach number to tend to unity, thereby causing the decrease in Mach number and the increase in temperature seen at this location. Also shown in Fig. \ref{fig:warpgamma} is the isentropic temperature relation which follows the computed solution very well until approximately the 0.4 m location, at which point the real gas effects become significant thereby reducing the accuracy of assuming isentropic flow. It is also noted that the isentropic result shows no temperature increase at the nozzle exit, reinforcing the fact that the computed temperature rise is indeed a viscous effect. \begin{figure}[p] \begin{center} \psfrag{y}[c][c][0.7][0]{y[m]} \psfrag{x}[c][c][0.7][0]{x[m]} \includegraphics[width=9cm]{warpmachcont.eps} % One can specify [angle=x,width=y,height=z] for the graphics \caption{Mach contours within nozzle} \label{fig:warpmach} \end{center} \end{figure} Also shown in Fig. \ref{fig:warpgamma} is the value for the ratio of specific heats throughout the nozzle. At the combustion chamber exit it starts at the value found from the equilibrium combustion calculations. During the nozzle design, this value is assumed constant throughout the expansion process and as can be seen from the computational results, this assumption is fairly accurate within the subsonic portion of the nozzle. However, as the expansion process continues past a distance downstream of approximately 0.4 m (Fig. \ref{fig:warpgamma}) the constant $\gamma$ assumption starts to become less valid. This accounts for the differences in the computed mach number in the latter portions of the nozzle as compared to the pre-specified distribution. This change in $\gamma$ is also responsible for the difference between the pre-specified and isentropic Mach number and temperature distributions shown in Figs. \ref{fig:warpcent} and \ref{fig:warpgamma}, since for the isentropic results $\gamma$ is held constant at the combustion chamber exit value (approximately 1.22). Figure \ref{fig:warpmach} shows the Mach contours in within the nozzle. Here one can see the approximate shape of the boundary layer (as these contours depend on velocity \emph{and} temperature, hence although the decreasing velocity is shown in the Mach number contours near the wall, this decrease is altered to some extent by the increasing temperature as one approaches the wall as well). Also, one can see the bubble of increased velocity along the nozzle centreline above the design Mach number of 5. % LocalWords: Subsonic Dalton's kmols kmol
{ "alphanum_fraction": 0.7144676194, "avg_line_length": 42.6316733961, "ext": "tex", "hexsha": "9f0e321711e74ef88378f10dd64cf99b45df8829", "lang": "TeX", "max_forks_count": 20, "max_forks_repo_forks_event_max_datetime": "2022-03-04T08:41:55.000Z", "max_forks_repo_forks_event_min_datetime": "2018-07-26T08:17:37.000Z", "max_forks_repo_head_hexsha": "9340a8526bb263d910f79d79e84dcac7aec211b6", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "zhanghuanqian/CFDWARP", "max_forks_repo_path": "tools/doc/Rocket_v2/rocket.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "9340a8526bb263d910f79d79e84dcac7aec211b6", "max_issues_repo_issues_event_max_datetime": "2021-11-23T09:21:28.000Z", "max_issues_repo_issues_event_min_datetime": "2020-11-10T11:28:30.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "zhanghuanqian/CFDWARP", "max_issues_repo_path": "tools/doc/Rocket_v2/rocket.tex", "max_line_length": 145, "max_stars_count": 29, "max_stars_repo_head_hexsha": "9340a8526bb263d910f79d79e84dcac7aec211b6", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "zhanghuanqian/CFDWARP", "max_stars_repo_path": "tools/doc/Rocket_v2/rocket.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-08T21:44:13.000Z", "max_stars_repo_stars_event_min_datetime": "2018-09-13T13:58:18.000Z", "num_tokens": 30277, "size": 95026 }
\input{header} \begin{document} \begin{center} {\LARGE DMZ Example} \vspace{0.1in}\\ \end{center} \copyrightnotice \section{Overview} This lab illustrates a simple DMZ. It is intended as an example network topology definition for Labtainer exercises. \section{Lab Environment} This lab runs in the Labtainer framework, available at http://nps.edu/web/c3o/labtainers. That site includes links to a pre-built virtual machine that has Labtainers installed, however Labtainers can be run on any Linux host that supports Docker containers. From your labtainer-student directory start the lab using: \begin{verbatim} labtainer dmz-example \end{verbatim} \noindent A link to this lab manual will be displayed. \section{Network Configuration} This lab includes several networked computers as shown in Figure~\ref{fig:topology}. Note however that your instance of the lab will have different IP addresses for some of the components. When the lab starts, you will get several virtual terminals, one connected to each component. The DMZ is created using an inner gateway and an outer gateway. The former uses NAT to hide internal network addresses. The outer gateway and the remote gateway each reach the Internet via an ISP with address 198.18.0.1. The local site has a network address of 198.18.1.0/24. The remote site has a network address of 203.0.113.0/24. \begin{figure}[H] \begin{center} \includegraphics [width=0.8\textwidth]{dmz-example.jpg} \end{center} \caption{Network topology for dmz-example lab} \label{fig:topology} \end{figure} \section{Lab Tasks} Use nmap on various components within the lab to determine which services are visible from different locations within the network topology. Review the iptables configurations as defined in the /etc/rc.local scripts on the inner and outer gateways. \section{Submission} After finishing the lab, go to the terminal on your Linux system that was used to start the lab and type: \begin{verbatim} stoplab dmz-example \end{verbatim} When you stop the lab, the system will display a path to the zipped lab results on your Linux system. Provide that file to your instructor, e.g., via the Sakai site. \end{document}
{ "alphanum_fraction": 0.7850296668, "avg_line_length": 33.7076923077, "ext": "tex", "hexsha": "d2670e80f840674d20638111d4438cb90c504db6", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f674204022ad5d13ad6bccaf02a14a283470d23f", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "jakuta-tech/Labtainers", "max_forks_repo_path": "labs/dmz-example/docs/dmz-example.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f674204022ad5d13ad6bccaf02a14a283470d23f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "jakuta-tech/Labtainers", "max_issues_repo_path": "labs/dmz-example/docs/dmz-example.tex", "max_line_length": 124, "max_stars_count": null, "max_stars_repo_head_hexsha": "f674204022ad5d13ad6bccaf02a14a283470d23f", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "jakuta-tech/Labtainers", "max_stars_repo_path": "labs/dmz-example/docs/dmz-example.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 542, "size": 2191 }
\chapter{\label{chapter:PropagatorStates}The State Vector for Propagation} \chapauthor{Darrel J. Conway}{Thinking Systems, Inc.} %\section{\label{section:MissionStateOverview}The Mission State} % The current values of spacecraft data, the state transition matrix, and other elements that evolve % in GMAT's model are held, in the propagation subsystem, in the mission state\footnote{Here -- and % throughout this chapter -- the term ``mission state'' and the word ``state'' represent this % collection of data elements, unless otherwise specified. Other chapters in the architectural % specification may use the word ``state'' for different purposes -- for example, the Solver classes % all function through finite state machines, where the current location of the system in the % solution process is assigned a specific status, called a state, and move from one of these % enumerated stated to another as the solution process is executed.}. GMAT's mission state can be % further decomposed into static components, components that evolve through numerical integration, % components that have analytic evolution operators, and components that are modeled over time using % stochastic models. % % \section{Classes Supporting the Mission State} % % The mission state data is culled from the objects that make use of the data contained in the %state, % and passed into the elements that the propagators use to calculate the state evolution. As an % example, each Spacecraft object manages data representing the position and velocity of that % particular Spacecraft. When GMAT needs to model the motion of that spacecraft, it gathers the % epoch and corresponding position and velocity information in a mission state, and passes that % mission state to the evolution operator so that the motion associated with the change of epoch can % be calculated. A more complete example of this process is presented in % Section~\ref{section:IntegratorExample}. \section{The MissionState Class} % The MissionState class plays two roles in GMAT. It acts as a container class that takes pointers % to the objects that supply state data, providing a central location for the state data for mission % elements that use it. It also supplies the accumulated state data to those elements in the form % that they need in order to process it and take actions, and routes any resulting changes in state % to the objects that receive those changes. % % The MissionState class collects data into vectors used by the propagators. These vectors are % constructed based on the needs of the propagator, and % % GMAT's Numerical Integrators \subsection{Enumerations used in the MissionState} The MissionState uses several enumerations used to identify propagation components efficiently. This section describes each of these enumerations. \paragraph{PropMode} The PropMode enumeration identifies the type of propagation used with a given set of state elements. \begin{itemize} \item \textbf{ANALYTIC\_PROP} \item \textbf{INTEGRATE} \item \textbf{PRECALCULATED\_PROP} \end{itemize} \paragraph{ElementType} The ElementType enumeration identifies the kind of component contained in a PropVector \begin{itemize} \item \textbf{CARTESIAN\_START} \item \textbf{CARTESIAN} \item \textbf{EQUINOCTIAL\_START} \item \textbf{EQUINOCTIAL} \item \textbf{STM\_START} \item \textbf{STM} \item \textbf{QUARTERNION\_START} \item \textbf{QUARTERNION} \item \textbf{MASS} \item \textbf{USER\_DEFINED} \item \textbf{UNKNOWN\_ELEMENT\_TYPE} \end{itemize} \subsection{MissionState Attributes} \begin{itemize} \item\textbf{Real epoch} The epoch of the state data managed by the MissionState. GMAT requires that all such state data in a MissionState use the same epoch. \item\textbf{ObjectArray dataSource} The vector of objects that are propagated \item\textbf{std::vector<PropMode> propModes} The propagation mode for each object that is propagated. \item\textbf{Integer dimension} Total number of elements that are propagated \item\textbf{PropVector thePropVector} The state data to be propagated \end{itemize} \subsubsection{MissionState Methods} \begin{itemize} \item\textbf{bool AddSource(GmatBase* src, PropMode mode, ElementType type, Integer elementId)} Registers an object as a data provider with the MissionState. The mode parameter identifies the type of propagation desired: analytic, numerically integrated, or from an ephemeris source. The type parameter identified the kind of element that is propagated. The elementId parameter is the ID for the start of the data that is propagated. All propagated data must be accessible using the generic access methods defined for GmatBase objects, so that the elementId can be used to access these data. \item\textbf{bool Initialize()} Performs preliminary setup of the PropVector prior to propagation. \item \textbf{bool PrepareToPropagate()} Completes pre-propagation setup. \end{itemize} \section{\label{section:propVector}The PropVector Class} %The propVector component of the MissionState is a Real array of data sized to match the data vector %needed by GMAT's numerical integrators for propagation. The propVector is constructed by the %MisisonState when the PrepareToPropagate methos if executed by the command that controls the %propagation. Figure~\ref{figure:PropVectorComponents} shows a representative layout for a %propVector. Figure~\ref{figure:PropVectorComponents} shows a representative layout of the data in a PropVector for a single spacecraft. The vector displayed here is the PropVector used by a numerical integrator that is modeling the evolution of the spacecraft's trajectory, state transition matrix, and attitude during a finite burn maneuver. When a MissionState object assembles a PropVector, it follows a set of ordering rules designed to make the data in the PropVector fall in a specific order so that access from the propagators is simplified. The general order, as shown in this example, is to place trajectory data first in the vector, followed by associated matrices that evolve along with the trajectory, then attitude data followed by associated attitde matrices, then user defined elements, and finally transitory elements like mass, which only changes (through propagation) during maneuvers. \begin{figure}[htb] \begin{center} \includegraphics[scale=0.5]{Images/PropVectorComponents.eps} \caption{\label{figure:PropVectorComponents}Representative Elements of a PropVector} \end{center} \end{figure} This ordering can be seen more explicitly in Figure~\ref{figure:ThreeSatPropVector}. The PropVector shown in this figure is a vector constructed for three spacecraft, where the mission needs to propagate the trajectory, state transition matrix, and attitude for all three while maneuvering all three simultaneously. \begin{figure}[htb] \begin{center} \includegraphics[scale=0.5]{Images/ThreeSatPropVector.eps} \caption{\label{figure:ThreeSatPropVector}Element Arrangement of a PropVector for Three Spacecraft} \end{center} \end{figure} Figure~\ref{figure:SelectPropVector} shown another example, where the propagation need not integrate every element of all of the spacecraft. In this example, the trajector is integrated for all three spacecraft. The state transition matrix is only propagated for the first and third spacecraft, the attitude is propagated for the second, and the first spacecraft is depleting mass during a maneuver. \begin{figure}[htb] \begin{center} \includegraphics[scale=0.5]{Images/ThreeSatActivePropVector.eps} \caption{\label{figure:SelectPropVector}Three Spacecraft Arrangement for Select Propagation} \end{center} \end{figure} Figure~\ref{figure:AttitudePropVector} \textbf{This figure needs updating to include the second PropVector for the trajectory piece} shows a mixed mode propagation, where the trajectory for our three spacecraft is propagated using a precalculated, ephemeris based propagator and the attitude is propagated numerically. \begin{figure}[htb] \begin{center} \includegraphics[scale=0.5]{Images/ThreeSatAttitudePropVector.eps} \caption{\label{figure:AttitudePropVector}PropVector for Attitude Only Propagation on Three Spacecraft} \end{center} \end{figure}
{ "alphanum_fraction": 0.8071524472, "avg_line_length": 49.3554216867, "ext": "tex", "hexsha": "40a7163e3bd63b7560270fc07a534ffc5301ba4f", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2020-12-09T07:06:55.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-13T10:26:49.000Z", "max_forks_repo_head_hexsha": "39673be967d856f14616462fb6473b27b21b149f", "max_forks_repo_licenses": [ "NASA-1.3" ], "max_forks_repo_name": "ddj116/gmat", "max_forks_repo_path": "doc/SystemDocs/ArchitecturalSpecification/PropagatorStates.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "39673be967d856f14616462fb6473b27b21b149f", "max_issues_repo_issues_event_max_datetime": "2018-03-20T20:11:26.000Z", "max_issues_repo_issues_event_min_datetime": "2018-03-15T08:58:37.000Z", "max_issues_repo_licenses": [ "NASA-1.3" ], "max_issues_repo_name": "ddj116/gmat", "max_issues_repo_path": "doc/SystemDocs/ArchitecturalSpecification/PropagatorStates.tex", "max_line_length": 100, "max_stars_count": 2, "max_stars_repo_head_hexsha": "d6a5b1fed68c33b0c4b1cfbd1e25a71cdfb8f8f5", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "Randl/GMAT", "max_stars_repo_path": "doc/SystemDocs/ArchitecturalSpecification/PropagatorStates.tex", "max_stars_repo_stars_event_max_datetime": "2020-12-09T07:05:07.000Z", "max_stars_repo_stars_event_min_datetime": "2020-01-01T13:14:57.000Z", "num_tokens": 1927, "size": 8193 }
\section{Alfredo Sauce} \label{alfredoSauce} \setcounter{secnumdepth}{0} Time: 20 minutes (5 minute prep, 15 minutes cooking) Serves: 4 \begin{multicols}{2} \subsection*{Ingredients} \begin{itemize} \item 1 pound Peeled shrimp or chicken breast \item Salt and pepper to taste \item Garlic powder \item Butter or olive oil to saute \item \( \frac{1}{2} \) cup butter \item 2 cup heavy whipping cream (higher fat content is better) \item 3 cloves garlic, crushed or minced \item 3 cups fresh-grated parmigiano reggiano \item \( \frac{1}{4} \) cup fresh chopped oregano, parsley, or marjoram \end{itemize} \subsection*{Hardware} \begin{itemize} \item Skillet \end{itemize} \clearpage \subsection*{Instructions} \begin{enumerate} \item Add salt, pepper, and garlic powder to 1 pound shrimp or chicken \item Saute shrimp in butter. Use olive oil for chicken. \item Set aside protein. \item In a skillet or saucepan, melt \( \frac{1}{2} \) cup butter at medium-low heat. \item Add 2 cups heavy cream, and simmer for 5 minutes while stirring. \item Add 3 cloves minced garlic to sauce, stir briefly. \item Add 3 cups grated cheese, stir cheese into sauce until the mixture is smooth. \item Mix in \( \frac{1}{4} \) cup oregano (or other green). \item Serve over protein source, usually on pasta (fettucini) \end{enumerate} \subsection*{Notes} \begin{itemize} \item I tend to use really good heavy cream and actual parmigiano reggiano, as opposed to the faux American Parmasean Cheese. The taste of the sauce really benefits from good ingredients. \end{itemize} \end{multicols} \clearpage
{ "alphanum_fraction": 0.7213213213, "avg_line_length": 36.1956521739, "ext": "tex", "hexsha": "7a8672b0b82fbedb8ba58c2349860444fdc20020", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "calebwatt15/caleb-watt-cookbook", "max_forks_repo_path": "chapters/saucesAndStuffings/alfredoSauce.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "calebwatt15/caleb-watt-cookbook", "max_issues_repo_path": "chapters/saucesAndStuffings/alfredoSauce.tex", "max_line_length": 191, "max_stars_count": 7, "max_stars_repo_head_hexsha": "abddcdb60e9422d63d945e7a9ec019c0288e34d7", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "calebwatt15/caleb-watt-cookbook", "max_stars_repo_path": "chapters/saucesAndStuffings/alfredoSauce.tex", "max_stars_repo_stars_event_max_datetime": "2021-05-24T18:06:54.000Z", "max_stars_repo_stars_event_min_datetime": "2018-03-10T06:39:22.000Z", "num_tokens": 478, "size": 1665 }
%!TEX root = main.tex \newcommand{\chapdir}{glub} % % Chapter image boolean % \newif\ifusechapterimage \usechapterimagetrue \newcommand{\thechapterimage}{}% \newcommand{\chapterimage}[1]{\ifusechapterimage\renewcommand{\thechapterimage}{#1}\fi}% \newcommand{\mychapters}[3]{ % Start chapters and demarcate mini TOCs \chapterimage{#3}% \chapter{#1}% \label{ch:#2}% \startcontents[chapters]% \WriteChap{#1}% } \newcommand{\marginlessinput}[1]{% % marginless input \newgeometry{left=2cm,right=2cm,bottom=1cm,top=2cm}% \input{#1}% \restoregeometry% } \newcommand{\marginlesspdf}[1]{ \newgeometry{left=2cm,right=2cm,bottom=2cm,top=2cm} \noindent\makebox[\textwidth]{\includegraphics[width=\paperwidth]{#1}} \restoregeometry } % mini toc \newcommand\chapterminitoc{% \printcontents[chapters]{}{1}{\setcounter{tocdepth}{2}}% } % objective \newcommand\objective[1]{% \vspace{0.2cm} \begin{derivation}{Objective} #1 \end{derivation} \vspace{0.2cm} } \newcommand\invisiblesection[1]{% \refstepcounter{section}% \addcontentsline{toc}{section}{\protect\numberline{\thesection}#1}% \sectionmark{#1}} %---------------------------------------------------------------------------------------- % Math specific stuff %---------------------------------------------------------------------------------------- % % https://tex.stackexchange.com/questions/37912/how-to-draw-the-parallel-circuits-sign % \newcommand{\pplus}{\mathbin{\!/\mkern-5mu+\mkern-5mu/\!}} \newcommand{\pminus}{\mathbin{\!/\mkern-5mu-\mkern-5mu/\!}} % Copyleft \newcommand{\copyleft}{\reflectbox{\copyright}} % long division \newcommand\longdiv[2]{% $\strut#1$\kern.25em\smash{\raise.3ex\hbox{$\big)$}}$\mkern-8mu \overline{\enspace\strut#2}$} % new decimal symbols \def\0{\mbox{\begin{picture}(11,12)(0,0) \put(5.5,5){\circle{9}} \end{picture} }} \def\6{\mbox{\begin{picture}(11,12)(0,0) \put(1,0){\line(1,0){10}} \put(1,0){\line(0,1){10}} \put(1,10){\line(1,-1){10}} \end{picture} }} \def\7{\mbox{\begin{picture}(11,12)(0,0) \put(1,10){\line(1,0){10}} \put(11,10){\line(0,-1){10}} \end{picture} }} \def\8{\mbox{\begin{picture}(11,12)(0,0) \put(1,0){\line(0,1){10}} \put(1,0){\line(1,1){10}} \put(1,10){\line(1,0){10}} \end{picture} }} \def\9{\mbox{\begin{picture}(11,12)(0,0) \put(1,0){\line(1,0){10}} \put(11,0){\line(0,1){10}} \end{picture} }} \def\5{\mbox{\begin{picture}(11,12)(0,0) \put(5.5,0){\oval(8,20)[t]} \end{picture} }} \def\1{\mbox{\begin{picture}(11,12)(0,0) \put(1,0){\line(1,0){10}} \put(1,0){\line(0,1){10}} \end{picture} }} \def\2{\mbox{\begin{picture}(11,12)(0,0) \put(1,10){\line(1,0){10}} \put(11,10){\line(0,-1){10}} \put(1,10){\line(1,-1){10}} \end{picture} }} \def\3{\mbox{\begin{picture}(11,12)(0,0) \put(1,0){\line(0,1){10}} \put(1,10){\line(1,0){10}} \end{picture} }} \def\4{\mbox{\begin{picture}(11,12)(0,0) \put(1,0){\line(1,0){10}} \put(11,0){\line(0,1){10}} \put(1,0){\line(1,1){10}} \end{picture} }} %%%%%%%%%%%%%%%% % % TRIANGLE OF POWER % % https://tex.stackexchange.com/questions/307833/how-to-represent-the-triangle-of-power-in-latex % %%%%%%%%%%%%%%% \newcommand{\dotriangle}[1]{% \raisebox{-.7ex}{$\vcenter{#1\kern.2ex\hbox{$\triangle$}\kern.2ex}$}% } \newcommand{\tripow}[3]{% Syntax: \tripow{#1}{#2}{#3} gives you #1 ^ {#2} = #3 \mathop{% We want it to an operator \mathchoice% We want different functionality in text and display mode {% DISPLAY MODE \vphantom{\dotriangle\LARGE}% \vphantom off-set: places the bottom entries. \rule[-1.4ex]{0.1em}{0pt}% Syntax: [<vetical drop #1>]{<left margin>}{<Should be 0>} _{\scriptstyle #1}% style of #1 entry {\overset{\scriptstyle #2}% style of #2 entry {\dotriangle\LARGE}}% Size of the displayed operator - should match the \vphantom off-set. \rule[-1.4ex]{0em}{0pt}% Syntax: [<vetical drop #3>]{<Should be 0>}{<Should be 0>} _{\scriptstyle #3}% style of #3 entry \rule[0ex]{0.1em}{0pt}% Syntax: [<Should be 0>]{<right margin>}{<Should be 0>} }% {% TEXT MODE \vphantom{\dotriangle\normalsize}% \rule[-1.05ex]{-0.7ex}{0pt}% _{#1}% \overset{#2}% {\dotriangle\normalsize}% size in text mode \rule[-1.05ex]{0pt}{0pt}% _{#3}% \rule[0ex]{-0.2em}{0pt}% }% {% SCRIPT MODE \vphantom{\dotriangle\normalsize}% \rule[-1.05ex]{-0.8ex}{0pt}% _{\scriptstyle #1}% {\overset{\scriptstyle #2}% {\dotriangle\normalsize}}% size in script mode \rule[-1.05ex]{0pt}{0pt}% _{\scriptstyle #3}% \rule[0ex]{-0.3em}{0pt}% }% {}% SCRIPTSCRIPT MODE }% }
{ "alphanum_fraction": 0.573358271, "avg_line_length": 27.0337078652, "ext": "tex", "hexsha": "92d5ab141ad632e52221fffd381fd77a4ea965cd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "aquatiki/AnalysisTextbook", "max_forks_repo_path": "macros.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "aquatiki/AnalysisTextbook", "max_issues_repo_path": "macros.tex", "max_line_length": 102, "max_stars_count": 2, "max_stars_repo_head_hexsha": "011c16427ada1b1e3df8e66c02566a5d5ac8abcf", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "aquatiki/AnalysisTextbook", "max_stars_repo_path": "macros.tex", "max_stars_repo_stars_event_max_datetime": "2019-07-07T12:32:53.000Z", "max_stars_repo_stars_event_min_datetime": "2017-10-08T15:05:17.000Z", "num_tokens": 1790, "size": 4812 }
\documentclass[a4paper,11pt]{article} \title{Calorimeter moments analysis} \author{Luca Baldini ([email protected]), Johan Bregeon ([email protected])} \usepackage{ifthen} \usepackage{graphicx} \usepackage{amsmath} \usepackage{hyperref} \usepackage{bbm} \usepackage[margin=2cm,bottom=3cm,top=4cm,marginparsep=0pt,marginparwidth=0pt]% {geometry} \usepackage[margin=1cm, font=small, labelfont=bf, labelsep=endash]{caption} \newcommand{\pder}[2]{\frac{\partial#1}{\partial#2}} \newcommand{\pdersec}[3]% {\ifthenelse{\equal{#2}{#3}}% {\frac{\partial^2#1}{\partial#2^2}}% {\frac{\partial^2#1}{\partial#2\partial#3}}% } \newcommand{\itm}{\mathbbm I} \newcommand{\itc}[1]{\itm_{#1}} \newcommand{\firstder}[2]{\frac{{\rm d}#1}{{\rm d}#2}} \newcommand{\secder}[2]{\frac{{\rm d}^2#1}{{\rm d}#2^2}} \newcommand{\tmax}{t_{\rm max}} \newcommand{\diff}{{\rm d}} \newcommand{\xdir}{\ensuremath{x_{\rm dir}}} \newcommand{\ydir}{\ensuremath{y_{\rm dir}}} \newcommand{\zdir}{\ensuremath{z_{\rm dir}}} \begin{document} \maketitle \abstract{These are some (more or less) random notes about the calorimeter moments analysis. The first part is a brief description of the basic code that I put together while trying to understand it. There's also some stuff dealing with possible improvements of the moments analysis, namely the measurement of the shower skeweness and the the error analysis aimed at the projection of the calorimeter clusters into the ACD. } \section{Basics of the CAL moments analysis} The code for the moments analysis basically calculates the moment of inertia tensor (using energy as the weight instead of mass) and then diagonalizes this to get the three principal axes. The basic definitions can be found in \cite{goldstein,landau} and in our case they read: \begin{align} \itc{xx} = \sum_{i=1}^n w_i(r_i^2 - x_i^2),\quad \itc{yy} &= \sum_{i=1}^n w_i(r_i^2 - y_i^2),\quad \itc{zz} = \sum_{i=1}^n w_i(r_i^2 - z_i^2)\\ \itc{xy} = -\sum_{i=1}^n w_ix_iy_i,\quad \itc{xz} &= -\sum_{i=1}^n w_ix_iz_i,\quad \itc{yz} = -\sum_{i=1}^n w_iy_iz_i \end{align} where the index $i$ runs over the $n$ hits in the calorimeter and the $w_i$ are the weights associated with the hits (essentially the energy release). In addition to the moment of inertia, the sum of weights and the coordinates of the energy centroids are also used: \begin{align} W &= \sum_{i=1}^n w_i\\ \mathbf{r}_c &= \frac{\sum_{i=1}^n w_i\mathbf{r}_i}{W} \end{align} In order to reduce the tensor of inertia to the principal axes we do have to solve the secular equation: \begin{equation} \det(\itm - \lambda{\mathbbm 1}) = \det\begin{pmatrix} \itc{xx} - \lambda & \itc{xy} & \itc{xz}\\ \itc{xy} & \itc{yy} - \lambda & \itc{yz}\\ \itc{xz} & \itc{yz} & \itc{zz} - \lambda \end{pmatrix} = 0 \end{equation} which is a cubic equation in $\lambda$ yielding the three eigenvalues. By working out the tedious algebra we can write the equation as: $$ \lambda^3 + c_2\lambda^2 + c_1\lambda + c_0 = 0 $$ where: \begin{align} c_2 &= -(\itc{xx} + \itc{yy} + \itc{zz})\\ c_1 &= \itc{xx}\itc{yy} + \itc{yy}\itc{zz} + \itc{xx}\itc{zz} - (\itc{xy}^2 + \itc{yz}^2 + \itc{xz}^2)\\ c_0 &= -\itc{xx}\itc{yy}\itc{zz} - 2\itc{xy}\itc{yz}\itc{xz} + \itc{xx}\itc{yz}^2 + \itc{yy}\itc{xz}^2 + \itc{zz}\itc{xy}^2 \end{align} If we now define a new variable $\lambda' = \lambda + c_2/3$, the previous equation becomes: $$ \lambda'^3 + a\lambda' + b = 0 $$ where: \begin{align} a &= \left(\frac{3c_1 - c_2^2}{3}\right)\\ b &= \left(\frac{27c_0 + 2c_2^2 - 9c_1c_2}{27}\right) \end{align} (the algebra is fully worked out in \cite{wolfram}). We now set: \begin{align} m &= 2\sqrt{\frac{-a}{3}}\\ \psi &= \frac{1}{3}\arccos\left(\frac{3b}{am}\right) \end{align} and, finally we get the three real solutions (guranteed by the fact that the tensor of inertia is symmetruc): \begin{align} \lambda_0 &= m\cos(\psi) - c_2/3\\ \lambda_1 &= m\cos(\psi + 2\pi/3) - c_2/3\\ \lambda_2 &= m\cos(\psi + 4\pi/3) - c_2/3 \end{align} Once we have the three eigenvalues we can work out the calcultaion of the eigenvectors $\mathbf{e}^i$ ($i = 1\ldots3$) defined by: $$ \itm\mathbf{e}^i = \lambda_i\mathbf{e}^i $$ Following these conventions, $\lambda_1$ is the largest eigenvalue and, as a consequence, $\mathbf{e}^1$ is the principal axis of the cluster. Once the three principal axis of the cluster have been found, the cluster $\chi^2$ (normalized to the number of \emph{degree of freedom}) is calculated as: \begin{equation} \chi^2 = \frac{\sum_{i=1}^n w_i d_i^2}{nW} \end{equation} where $d_i$ are the distances from each of the calorimeter hits to the axis parallel to $\mathbf{e}^1$ and passing throught the cluster centroid. Finally the some well know Merit quantities are calculated: \begin{align} \texttt{CalTransRms} &= \sqrt{\frac{|\lambda_1|}{W}}\\ \texttt{CalLongRms} &= \sqrt{\frac{|\lambda_0| + |\lambda_2|}{2W\log L}}\\ \texttt{CalLRmsAsym} &= \sqrt{\frac{|\lambda_0| - |\lambda_2|} {|\lambda_0| + |\lambda_2|}} \end{align} where $L$ is the number of radiation lengths transversed. \subsection{Outline of the iterative moments analysis} Put here some details about the iteration scheme, as they are relevant for the calculation of the skewness (i.e. you get significantly different answers at different steps). \section{The (toy) problem in 2D} I thought the problem of the diagonalization of the inertia tensor in 2D could be useful for working out the error analysis, so here is a quick look at it. The basic definitions read as: \begin{align} \itc{xx} = \sum_{i=1}^n w_i y_i^2,\quad \itc{yy} = \sum_{i=1}^n w_i x_i^2,\quad \itc{xy} = -\sum_{i=1}^n w_ix_iy_i \end{align} and the secular equation is: \begin{equation} \det(\itm - \lambda{\mathbbm 1}) = \det\begin{pmatrix} \itc{xx} - \lambda & \itc{xy}\\ \itc{xy} & \itc{yy} - \lambda \end{pmatrix} = 0 \end{equation} The eigenvalues are readily found: \begin{align} \lambda_0 &= \frac{(\itc{xx} + \itc{yy}) - \sqrt{(\itc{xx} - \itc{yy})^2 + 4\itc{xy}^2}}{2}\\ \lambda_1 &= \frac{(\itc{xx} + \itc{yy}) + \sqrt{(\itc{xx} - \itc{yy})^2 + 4\itc{xy}^2}}{2}\\ \end{align} At this point, assuming that $\lambda_0$ is the smallest eigenvalue, we're left with the problem of calculating the corresponding eigenvector $\mathbf{e}^0$, obeying the equation: $$ \itm \mathbf{e}^0 = \lambda_0\mathbf{e}^0 $$ Being the problem bi-dimensional, the two eigenvectors can be parametrized in terms of a single rotation angle $\phi$ (to be determined), representing the angle of the principal axis with respect to the original reference frame i.e.: \begin{equation} \begin{pmatrix} e^0_x\\ e^0_y \end{pmatrix} = \begin{pmatrix} \cos\phi\\ \sin\phi \end{pmatrix},\quad \begin{pmatrix} e^1_x\\ e^1_y \end{pmatrix} = \begin{pmatrix} -\sin\phi\\ \cos\phi \end{pmatrix} \end{equation} By putting everything together, from the definition of the principal axis we get: $$ \begin{pmatrix} \itc{xx} & \itc{xy}\\ \itc{xy} & \itc{yy} \end{pmatrix} \begin{pmatrix} \cos\phi\\ \sin\phi \end{pmatrix} = \lambda_1 \begin{pmatrix} \cos\phi\\ \sin\phi \end{pmatrix} $$ The first of the two equations in the system can be squared, yielding: $$ \frac{(\itc{xx} - \itc{yy})}{\itc{xy}}\tan\phi = \tan^2\phi - 1 $$ and eventually, through the trigonometric equation $$ \tan(2\phi) = \frac{2\tan\phi}{1 - \tan^2\phi} $$ we get: \begin{equation} \phi = -\frac{1}{2} \arctan \left( \frac{2\itc{xy}}{\itc{yy} - \itc{xx}} \right) \end{equation} The rotation matrix between the original system and the one defined by the principal axis has the (transpose of) the two eigenvectors as its rows: \begin{equation} S = \begin{pmatrix} e^0_x & e^0_y\\ e^1_x & e^1_y \end{pmatrix} = \begin{pmatrix} \cos\phi & \sin\phi\\ -\sin\phi & \cos\phi \end{pmatrix} \end{equation} and obviously we have: \begin{equation}\label{eq:rotation} \lambda = \begin{pmatrix} \lambda_0 & 0\\ 0 & \lambda_1 \end{pmatrix} = S\;\itm\;S^{-1} = S\;\itm\;S^{\rm T} \end{equation} \subsection{Error analysis in 2 dimensions} As a first ingredient we'll need the derivatives of the components of the tensor of inertia with respect to the coordinates and the weights: \begin{align} \pder{\itc{xx}}{x_i} &= 0,\quad \pder{\itc{xx}}{y_i} = 2w_iy_i,\quad \pder{\itc{xx}}{w_i} = y_i^2\\ \pder{\itc{yy}}{x_i} &= 2w_ix_i,\quad \pder{\itc{yy}}{y_i} = 0,\quad \pder{\itc{yy}}{w_i} = x_i^2\\ \pder{\itc{xy}}{x_i} &= -w_iy_i,\quad \pder{\itc{xy}}{y_i} = -w_ix_i,\quad \pder{\itc{xy}}{w_i} = -x_iy_i \end{align} We can then calculate the full covariance matrix of the errors using the usual formula (see \cite{pdg}, section 32.1.4 for instance). Assuming that the errors on the two positions and on the weights are mutually not correlated (i.e. their covariance matrix is diagonal), we have: \begin{equation} \Sigma_{k-l} = \sum_{i=1}^n \pder{\itc{k}}{x_i}\pder{\itc{l}}{x_i}(\Delta x_i)^2 + \pder{\itc{k}}{y_i}\pder{\itc{l}}{y_i}(\Delta y_i)^2 + \pder{\itc{k}}{w_i}\pder{\itc{l}}{w_i}(\Delta w_i)^2 \end{equation} where $k$ and $l$ run over the three (double) indexes $xx$, $yy$ and $xy$. We're ready to work out the details: \begin{align} \Sigma_{xx-xx} &= (\Delta\itc{xx})^2 = \sum_{i=1}^n \left[4w^2_iy^2_i(\Delta y_i)^2 + y_i^4(\Delta w_i)^2\right]\\ \Sigma_{yy-yy} &= (\Delta\itc{yy})^2 = \sum_{i=1}^n \left[4w^2_ix^2_i(\Delta x_i)^2 + x_i^4(\Delta w_i)^2\right]\\ \Sigma_{xy-xy} &= (\Delta\itc{xy})^2 = \sum_{i=1}^n \left[w_i^2y_i^2(\Delta x_i)^2 + w_i^2x_i^2(\Delta y_i)^2 + x_i^2y_i^2(\Delta w_i)^2 \right]\\ \Sigma_{xx-yy} &= \sum_{i=1}^n \left[ x_i^2y_i^2 (\Delta w_i)^2\right]\\ \Sigma_{xx-xy} &= -\sum_{i=1}^n \left[ 2w_i^2x_iy_i(\Delta y_i)^2 + x_iy_i^3(\Delta w_i)^2 \right]\\ \Sigma_{xy-yy} &= -\sum_{i=1}^n \left[ 2w_i^2x_iy_i(\Delta x_i)^2 + x_i^3y_i(\Delta w_i)^2 \right] \end{align} The rest of this section follows closely the prescription described in \cite{errors} for the error propagation. We can slice the $2 \times 2$ tensor of inertia and define a 4-component vector with the two columns one on top of the other (this is what we call the $vec$ operator): \begin{equation} vec(\itm) = \begin{pmatrix} \itc{xx}\\ \itc{xy}\\ \itc{xy}\\ \itc{yy} \end{pmatrix} \end{equation} That said, we can rewite the equation (\ref{eq:rotation}) using the Kronecker product of the rotation matrix $$ T = S \otimes S $$ as: \begin{equation}\label{eq:tensor_transform} vec(\lambda) = \begin{pmatrix} \lambda_0\\ 0\\ 0\\ \lambda_1 \end{pmatrix} = T vec(\itm) \end{equation} It is useful to rearrange the the elements of the $vec$ operator in such a way that the diagonal elements of the tensor come first, followed by the non-diagonal ones, getting rid of the duplicated terms. This is accomplished by introducing the matrix: \begin{equation} D = \begin{pmatrix} 1 & 0 & 0 & 0\\ 0 & 0 & 0 & 1\\ 0 & \frac{1}{2} & \frac{1}{2} & 0 \end{pmatrix} \end{equation} which allows to define a new operator $v_d$: \begin{equation} v_d(\itm) = D vec(\itm) = \begin{pmatrix} 1 & 0 & 0 & 0\\ 0 & 0 & 0 & 1\\ 0 & \frac{1}{2} & \frac{1}{2} & 0 \end{pmatrix} \begin{pmatrix} \itc{xx}\\ \itc{xy}\\ \itc{xy}\\ \itc{yy} \end{pmatrix} = \begin{pmatrix} \itc{xx}\\ \itc{yy}\\ \itc{xy} \end{pmatrix} \end{equation} Talking about which, the covariance matrix of $v_d(\itm)$ reads: \begin{equation} \Sigma_{v_d(\itm)} = \begin{pmatrix} \Sigma_{xx-xx} & \Sigma_{xx-yy} & \Sigma_{xx-xy}\\ \Sigma_{xx-yy} & \Sigma_{yy-yy} & \Sigma_{xy-yy}\\ \Sigma_{xx-xy} & \Sigma_{xy-yy} & \Sigma_{xy-xy} \end{pmatrix} \end{equation} in terms of the quantities we have calculated a few lines above. In order to go back from the $v_d$ to the $vec$ representation we need the so called pseudo-inverse of $D$: \begin{equation} D^+ = \begin{pmatrix} 1 & 0 & 0\\ 0 & 0 & 1\\ 0 & 0 & 1\\ 0 & 1 & 0 \end{pmatrix} \end{equation} The paper \cite{errors} is wrong to this respect (see \cite{errors_corr}). Using these definitions, equation (\ref{eq:tensor_transform}) can be rewritten as: \begin{equation} v_d(\lambda) = \begin{pmatrix} \lambda_0\\ \lambda_1\\ 0 \end{pmatrix} = D vec(\lambda) = DT vec(\itm) = DTD^+ v_d(\itm) \end{equation} If we define: \begin{equation} V = DTD^+ \end{equation} we can rewrite the previous equation as: \begin{equation} v_d(\lambda) = V v_d(\itm) \end{equation} The infinitesimal change $\diff S$ in the rotation matrix $S$ when we change the rotation angle $\phi$ by an infinitesimal amount $\diff\phi$ can be easily calculated by differentiating $S$ itself: $$ \diff S = \begin{pmatrix} -\sin\phi\;\diff\phi & \cos\phi\;\diff\phi\\ -\cos\phi\;\diff\phi & -\sin\phi\;\diff\phi\\ \end{pmatrix} $$ If we introduce the antisimmetric tensor: \begin{equation} \Omega = \begin{pmatrix} 0 & -\diff\phi\\ \diff\phi & 0 \end{pmatrix} \end{equation} we can rewrite the previous equation as: \begin{equation} \diff S = -S\Omega \end{equation} and, along the same lines, we have: \begin{equation} \diff S^{\rm T} = \Omega S^{\rm T} \end{equation} as can be verified by direct matrix multiplication. It seems a bit odd, here, to call $\Omega$ an infinitesimal quantity (I would have rather named it $\diff\Omega$), but we'll bravely follow the conventions used in the paper to avoid confusion. We now define the quantity: \begin{equation} \Omega^p = S\Omega S^{\rm T} = \begin{pmatrix} 0 & -\diff\phi\\ \diff\phi & 0\\ \end{pmatrix} = \Omega \end{equation} By putting all together we have: \begin{equation} S \diff \itm S^{\rm T} = \diff\mu = \Omega^p\lambda - \lambda\Omega^p + \diff\lambda = \begin{pmatrix} \diff\lambda_0 & (\lambda_0 - \lambda_1)\diff\phi\\ (\lambda_0 - \lambda_1)\diff\phi & \diff\lambda_1 \end{pmatrix} \end{equation} and we can write: \begin{equation} vec(\diff\mu) = G\beta \end{equation} where: \begin{equation} G = \begin{pmatrix} 1 & 0 & 0\\ 0 & 0 & (\lambda_1 - \lambda_0)\\ 0 & 0 & (\lambda_1 - \lambda_0)\\ 0 & 1 & 0 \end{pmatrix} \end{equation} and \begin{equation} \beta = \begin{pmatrix} \diff\lambda_0\\ \diff\lambda_1\\ -\diff\phi \end{pmatrix} \end{equation} (again, $\beta$ is a differential quantities, so it is a bit odd to name it without a $\diff$ in front). Further on through the paper: \begin{equation} v_d(\diff\itm) = \begin{pmatrix} d\itc{xx}\\ d\itc{yy}\\ d\itc{xy} \end{pmatrix} = D (S^{\rm T} \otimes S^{\rm T}) vec(d\mu) = D (S^{\rm T} \otimes S^{\rm T}) G\beta = F\beta \end{equation} where we have defined: \begin{equation} F = D (S^{\rm T} \otimes S^{\rm T}) G \end{equation} All we have to do is invert this equation, namely find $F^{-1}$. If we were dealing with square matrices, all we had to do would be: $$ F^{-1} = \left( D (S^{\rm T} \otimes S^{\rm T}) G \right)^{-1} = G^{-1} \left( S^{\rm T} \otimes S^{\rm T} \right)^{-1} D^{-1} = G^{-1} \left( S \otimes S \right) D^{-1} = G^{-1} T D^{-1} $$ But in fact $G$ and $D$ are rectangular matrices, so we need again the pseudo inverses. We have already the one for $D$, while for $G$, since $G^TG$ is not singular, the solution is even easier: \begin{equation} G^+ = \left( G^{\rm T}G \right)^{-1} G^{\rm T} = \begin{pmatrix} 1 & 0 & 0 & 0\\ 0 & 0 & 0 & 1\\ 0 & \frac{1}{2(\lambda_1-\lambda_0)} & \frac{1}{2(\lambda_1-\lambda_0)} & 0 \end{pmatrix} \end{equation} (this satisfies $G^+G = I_{3\times3}$, as can be easily verified by direct multiplication). Summarizing: \begin{equation} \beta = F^{-1}v_d(\diff\itm) \end{equation} and, more important, we get the full covariance matrix of the eigenvalues and rotation angle: \begin{equation} \Sigma_\beta = F^{-1} \Sigma_{v_d(\itm)} (F^{-1})^T \end{equation} Once we have the error on the rotation angle: \begin{equation} (\Delta\phi)^2 = (\Sigma_\beta)_{33} \end{equation} the covariance matrix of the two components of the principal axis is: \begin{equation} \Sigma_{\mathbf{e}^0} = \begin{pmatrix} \pder{e^0_x}{\phi}\pder{e^0_x}{\phi}(\Delta\phi)^2 & \pder{e^0_x}{\phi}\pder{e^0_y}{\phi}(\Delta\phi)^2\\ \pder{e^0_x}{\phi}\pder{e^0_y}{\phi}(\Delta\phi)^2 & \pder{e^0_y}{\phi}\pder{e^0_y}{\phi}(\Delta\phi)^2 \end{pmatrix} = \begin{pmatrix} \sin^2\phi & -\sin\phi\cos\phi\\ -\sin\phi\cos\phi & \cos^2\phi \end{pmatrix}(\Delta\phi)^2 \end{equation} \section{Error analysis in 3 dimensions} Along the same lines of the previous subsection the derivatives of the components of the inertia tensor are: \begin{align} \pder{\itc{xx}}{x_i} &= 0,\quad \pder{\itc{xx}}{y_i} = 2w_iy_i,\quad \pder{\itc{xx}}{z_i} = 2w_iz_i,\quad \pder{\itc{xx}}{w_i} = (y_i^2 + z_i^2)\\ \pder{\itc{yy}}{x_i} &= 2w_ix_i,\quad \pder{\itc{yy}}{y_i} = 0,\quad \pder{\itc{yy}}{z_i} = 2w_iz_i,\quad \pder{\itc{yy}}{w_i} = (x_i^2 + z_i^2)\\ \pder{\itc{zz}}{x_i} &= 2w_ix_i,\quad \pder{\itc{zz}}{y_i} = 2w_iy_i,\quad \pder{\itc{zz}}{z_i} = 0,\quad \pder{\itc{zz}}{w_i} = (x_i^2 + y_i^2)\\ \pder{\itc{xy}}{x_i} &= -w_iy_i,\quad \pder{\itc{xy}}{y_i} = -w_ix_i,\quad \pder{\itc{xy}}{z_i} = 0,\quad \pder{\itc{xy}}{w_i} = -x_iy_i\\ \pder{\itc{xz}}{x_i} &= -w_iz_i,\quad \pder{\itc{xz}}{y_i} = 0,\quad \pder{\itc{xz}}{z_i} = -w_ix_i,\quad \pder{\itc{xz}}{w_i} = -x_iz_i\\ \pder{\itc{yz}}{x_i} &= 0,\quad \pder{\itc{yz}}{y_i} = -w_iz_i,\quad \pder{\itc{yz}}{z_i} = -w_iy_i,\quad \pder{\itc{yz}}{w_i} = -y_iz_i \end{align} The elements of the covariance matrix are: \begin{equation} \Sigma_{k-l} = \sum_{i=1}^n \pder{\itc{k}}{x_i}\pder{\itc{l}}{x_i}(\Delta x_i)^2 + \pder{\itc{k}}{y_i}\pder{\itc{l}}{y_i}(\Delta y_i)^2 + \pder{\itc{k}}{z_i}\pder{\itc{l}}{z_i}(\Delta z_i)^2 + \pder{\itc{k}}{w_i}\pder{\itc{l}}{w_i}(\Delta w_i)^2 \end{equation} where now $k$ and $l$ run over the 6 independent (double) indexes of the indertia tensor $xx$, $yy$, $zz$, $xy$, $xz$ and $yz$. Therefore the $6\times6$ covariancewe will have $6(6+1)/2 = 21$ components: \begin{align} \Sigma_{xx-xx} &= \sum_{i=1}^n \left\{ 4w_i^2 \left[ y_i^2(\Delta y_i)^2 + z_i^2(\Delta z_i)^2 \right] + \left(y_i^2 + z_i^2\right)^2(\Delta w_i)^2 \right\}\\ \Sigma_{xx-yy} &= \sum_{i=1}^n \left\{ 4w_i^2z_i^2 (\Delta z_i)^2 + (x_i^2 + z_i^2)(y_i^2 + z_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{xx-zz} &= \sum_{i=1}^n \left\{ 4w_i^2y_i^2 (\Delta y_i)^2 + (x_i^2 + y_i^2)(y_i^2 + z_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{xx-xy} &= -\sum_{i=1}^n x_iy_i \left\{ 2w_i^2 (\Delta y_i)^2 + (y_i^2 + z_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{xx-xz} &= -\sum_{i=1}^n x_iz_i \left\{ 2w_i^2 (\Delta z_i)^2 + (y_i^2 + z_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{xx-yz} &= -\sum_{i=1}^n y_iz_i \left\{ 2w_i^2 \left[ (\Delta y_i)^2 + (\Delta z_i)^2 \right] + (y_i^2 + z_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{yy-yy} &= \sum_{i=1}^n \left\{ 4w_i^2 \left[ x_i^2(\Delta x_i)^2 + z_i^2(\Delta z_i)^2 \right] + \left(x_i^2 + z_i^2\right)^2(\Delta w_i)^2 \right\}\\ \Sigma_{yy-zz} &= \sum_{i=1}^n \left\{ 4w_i^2x_i^2 (\Delta x_i)^2 + (x_i^2 + y_i^2)(x_i^2 + z_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{yy-xy} &= -\sum_{i=1}^n x_iy_i \left\{ 2w_i^2 (\Delta x_i)^2 + (x_i^2 + z_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{yy-xz} &= -\sum_{i=1}^n x_iz_i \left\{ 2w_i^2 \left[ (\Delta x_i)^2 + (\Delta z_i)^2 \right] + (x_i^2 + z_i^2)(\Delta w_i)^2 \right\} \end{align} \begin{align} \Sigma_{yy-yz} &= -\sum_{i=1}^n y_iz_i \left\{ 2w_i^2 (\Delta z_i)^2 + (x_i^2 + z_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{zz-zz} &= \sum_{i=1}^n \left\{ 4w_i^2 \left[ x_i^2(\Delta x_i)^2 + y_i^2(\Delta y_i)^2 \right] + \left(x_i^2 + y_i^2\right)^2(\Delta w_i)^2 \right\}\\ \Sigma_{zz-xy} &= -\sum_{i=1}^n x_iy_i \left\{ 2w_i^2 \left[ (\Delta x_i)^2 + (\Delta y_i)^2 \right] + (x_i^2 + y_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{zz-xz} &= -\sum_{i=1}^n x_iz_i \left\{ 2w_i^2 (\Delta x_i)^2 + (x_i^2 + y_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{zz-yz} &= -\sum_{i=1}^n y_iz_i \left\{ 2w_i^2 (\Delta y_i)^2 + (x_i^2 + y_i^2)(\Delta w_i)^2 \right\}\\ \Sigma_{xy-xy} &= \sum_{i=1}^n \left\{ w_i^2 \left[ y_i^2 (\Delta x_i)^2 + x_i^2 (\Delta y_i)^2 \right] + x_i^2y_i^2 (\Delta w_i)^2 \right\}\\ \Sigma_{xy-xz} &= \sum_{i=1}^n y_iz_i \left\{ w_i^2 (\Delta x_i)^2 + x_i^2 (\Delta w_i)^2 \right\}\\ \Sigma_{xy-yz} &= \sum_{i=1}^n x_iz_i \left\{ w_i^2 (\Delta y_i)^2 + y_i^2 (\Delta w_i)^2 \right\}\\ \Sigma_{yz-yz} &= \sum_{i=1}^n \left\{ w_i^2 \left[ z_i^2 (\Delta y_i)^2 + y_i^2 (\Delta z_i)^2 \right] + y_i^2z_i^2 (\Delta w_i)^2 \right\}\\ \Sigma_{yz-xz} &= \sum_{i=1}^n x_iy_i \left\{ w_i^2 (\Delta z_i)^2 + z_i^2 (\Delta w_i)^2 \right\}\\ \Sigma_{xz-xz} &= \sum_{i=1}^n \left\{ w_i^2 \left[ z_i^2 (\Delta x_i)^2 + x_i^2 (\Delta z_i)^2 \right] + x_i^2z_i^2 (\Delta w_i)^2 \right\} \end{align} In analogy with the two-dimensional case we define: \begin{equation} D = \begin{pmatrix} 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1\\ 0 & \frac{1}{2} & 0 & \frac{1}{2} & 0 & 0 & 0 & 0 & 0\\ 0 & 0 & \frac{1}{2} & 0 & 0 & 0 & \frac{1}{2} & 0 & 0\\ 0 & 0 & 0 & 0 & 0 & \frac{1}{2} & 0 & \frac{1}{2} & 0 \end{pmatrix} \end{equation} so that \begin{equation} v_d(\itm) = D vec(\itm) = \begin{pmatrix} \itc{xx}\\ \itc{yy}\\ \itc{zz}\\ \itc{xy}\\ \itc{xz}\\ \itc{yz} \end{pmatrix} \end{equation} and the $6\times6$ covariance matrix of the components of the inertia tensor is: \begin{equation} \Sigma_{v_d(\itm)} = \begin{pmatrix} \Sigma_{xx-xx} & \Sigma_{xx-yy} & \Sigma_{xx-zz} & \Sigma_{xx-xy} & \Sigma_{xx-xz} & \Sigma_{xx-yz}\\ \Sigma_{xx-yy} & \Sigma_{yy-yy} & \Sigma_{yy-zz} & \Sigma_{yy-xy} & \Sigma_{yy-xz} & \Sigma_{yy-yz}\\ \Sigma_{xx-zz} & \Sigma_{yy-zz} & \Sigma_{zz-zz} & \Sigma_{zz-xy} & \Sigma_{zz-xz} & \Sigma_{zz-yz}\\ \Sigma_{xx-xy} & \Sigma_{yy-xy} & \Sigma_{zz-xy} & \Sigma_{xy-xy} & \Sigma_{xy-xz} & \Sigma_{xy-yz}\\ \Sigma_{xx-xz} & \Sigma_{yy-xz} & \Sigma_{zz-xz} & \Sigma_{xy-xz} & \Sigma_{xz-xz} & \Sigma_{xz-yz}\\ \Sigma_{xx-yz} & \Sigma_{yy-yz} & \Sigma_{zz-yz} & \Sigma_{xy-yz} & \Sigma_{xz-yz} & \Sigma_{yz-yz}\\ \end{pmatrix} \end{equation} In the three-dimensional case $D^+$ reads: \begin{equation} D^+ = \begin{pmatrix} 1 & 0 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 1 & 0 & 0\\ 0 & 0 & 0 & 0 & 1 & 0\\ 0 & 0 & 0 & 1 & 0 & 0\\ 0 & 1 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0 & 0 & 1\\ 0 & 0 & 0 & 0 & 1 & 0\\ 0 & 0 & 0 & 0 & 0 & 1\\ 0 & 0 & 1 & 0 & 0 & 0 \end{pmatrix} \end{equation} and the rotation matrix is obviously: \begin{equation} S = \begin{pmatrix} e^0_x & e^0_y & e^0_z\\ e^1_x & e^1_y & e^1_z\\ e^2_x & e^2_y & e^2_z \end{pmatrix} \end{equation} The last pieces of information that we need are: \begin{equation} S_p = \begin{pmatrix} 0 & S_{20} & -S_{10}\\ -S_{20} & 0 & S_{00}\\ S_{10} & -S_{00} & 0\\ 0 & S_{21} & -S_{11}\\ -S_{21} & 0 & S_{01}\\ S_{11} & -S_{01} & 0\\ 0 & S_{22} & -S_{12}\\ -S_{22} & 0 & S_{02}\\ S_{12} & -S_{02} & 0 \end{pmatrix} \end{equation} and: \begin{equation} G^+ = \begin{pmatrix} 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1\\ 0 & 0 & 0 & 0 & 0 & \frac{1}{2(\lambda_2 - \lambda_1)} & 0 & \frac{1}{2(\lambda_2 - \lambda_1)} & 0\\ 0 & 0 & \frac{1}{2(\lambda_0 - \lambda_2)} & 0 & 0 & 0 & \frac{1}{2(\lambda_0 - \lambda_2)} & 0 & 0\\ 0 & \frac{1}{2(\lambda_1 - \lambda_0)} & 0 & \frac{1}{2(\lambda_1 - \lambda_0)} & 0 & 0 & 0 & 0 & 0 \end{pmatrix} \end{equation} And at this point we just follow the paper (the new one, not the old one): \begin{align} F^{-1} &= G^+ (S \otimes S) D^+\\ K &= \begin{pmatrix} 0_{9\times3} & S_p\\ I_{3\times3} & 0_{3\times3} \end{pmatrix} F^{-1} \end{align} and eventually: \begin{equation} \Sigma_{vec(S), \lambda} = K \Sigma_{v_d(\itm)} K^T \end{equation} \subsection{Simplified error analysis} The main purpose in this section is to recover the expression for the covariance matrix in the $(\xdir, \ydir, \zdir)$ representation when error estimates on $\theta$ and $\phi$ are available---either on an event-by-event basis or through some phenomenological parametrization. We will assume for simplicity that the errors on $\theta$ and $\phi$ are uncorrelated. The basic transformation equations are: \begin{align} \xdir & = \sin\theta \cos\phi\\ \ydir & = \sin\theta \sin\phi\\ \zdir & = \cos\theta \end{align} and the elements of the covariance matrix, as a function of $\theta$ and $\phi$, read: \begin{equation} \Sigma_{ij} = E[C_i C_j] \end{equation} where, denoting the central values with the subscript zero: \begin{align} C_x & = (\sin\theta \cos\phi - \sin\theta_0 \cos\phi_0)\\ C_y & = (\sin\theta \sin\phi - \sin\theta_0 \sin\phi_0)\\ C_z & = (\cos\theta - \cos\theta_0) \end{align} We will take advantage of the fact that in the series expansion of the expectation values: \begin{align} E[f(\theta, \phi)] & \approx f(\theta_0, \phi_0) + \left[\pder{f(\theta, \phi)}{\theta}\right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\!\!\!E[(\theta - \theta_0)] + \left[\pder{f(\theta, \phi)}{\phi}\right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\!\!\!E[(\phi - \phi_0)] +\\\nonumber & \frac{1}{2}\left[\pdersec{f(\theta, \phi)}{\theta}{\theta} \right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\!\!\!E[(\theta - \theta_0)^2] + \frac{1}{2}\left[\pdersec{f(\theta, \phi)}{\phi}{\phi} \right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\!\!\!E[(\phi - \phi_0)^2] +\\\nonumber & \left[\pdersec{f(\theta, \phi)}{\theta}{\phi}\right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\!\!\!E[(\theta - \theta_0)(\phi - \phi_0)] \end{align} the first three terms vanish for obvious reasons, and so does the last, if the errors on $\theta$ and $\phi$ are not correlated. Therefore: \begin{align} E[f(\theta, \phi)] \approx \frac{1}{2}\left[\pdersec{f(\theta, \phi)}{\theta}{\theta} \right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\!\!\!\sigma^2_\theta + \frac{1}{2}\left[\pdersec{f(\theta, \phi)}{\phi}{\phi} \right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\!\!\!\sigma^2_\phi \end{align} In the following we will need: \begin{align} \nonumber \left[\pdersec{C_i(\theta, \phi)C_j(\theta, \phi)}{\theta}{\theta} \right]_{\theta_0, \phi_0} \quad {\rm and}\quad \left[\pdersec{C_i(\theta, \phi)C_j(\theta, \phi)}{\phi}{\phi} \right]_{\theta_0, \phi_0} \end{align} The basic blocks are: \begin{align} \nonumber \pder{C_x}{\theta} &= \cos\theta\cos\phi ,\quad \pder{C_x}{\phi} = -\sin\theta\sin\phi\\\nonumber \pder{C_y}{\theta} &= \cos\theta\sin\phi ,\quad \pder{C_y}{\phi} = \sin\theta\cos\phi\\\nonumber \pder{C_z}{\theta} &= -\sin\theta ,\quad \pder{C_z}{\phi} = 0\\\nonumber \pdersec{C_x}{\theta}{\theta} &= -\sin\theta\cos\phi ,\quad \pdersec{C_x}{\phi}{\phi} = -\sin\theta\cos\phi\\\nonumber \pdersec{C_y}{\theta}{\theta} &= -\sin\theta\sin\phi ,\quad \pdersec{C_y}{\phi}{\phi} = -\sin\theta\sin\phi\\\nonumber \pdersec{C_z}{\theta}{\theta} &= -\cos\theta ,\quad \pdersec{C_z}{\phi}{\phi} = 0\\\nonumber \end{align} And the actual ingredients for the covariance matrix can be easily calculated remembering that all the terms containing a $C_i$ multiplicative factor are zero when calculated in $(\theta_0, \phi_0)$: \begin{align} \left[\pdersec{C^2_x}{\theta}{\theta}\right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\! = \left[\pder{}{\theta}\left(2C_x\pder{C_x}{\theta}\right) \right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\! = \left[2\left(\pder{C_x}{\theta}\right)^2 + 2C_x\pdersec{C_x}{\theta}{\theta}\right]_{\theta_0, \phi_0} \!\!\!\!\!\!\!\! = 2\cos^2\theta_0\cos^2\phi_0 \end{align} So, along the same lines (and defining for compactness $s_{\theta_0} = \sin\theta_0$, $c_{\theta_0} = \cos\theta_0$, $s_{\phi_0} = \sin\phi_0$, $c_{\phi_0} = \cos\phi_0$): \begin{equation} %m[0, 0] = ct2*cp2*dt2 + st2*sp2*dp2 %m[1, 1] = ct2*sp2*dt2 + st2*cp2*dp2 %m[2, 2] = st2*dt2 %m[0, 1] = ct2*cp*sp*dt2 - st2*cp*sp*dp2 %m[0, 2] = -st*ct*cp*dt2 %m[1, 2] = -st*ct*sp*dt2 %m[1, 0] = m[0, 1] %m[2, 0] = m[0, 2] %m[2, 1] = m[1, 2] \Sigma = \begin{pmatrix} c_{\theta_0}^2c_{\phi_0}^2\sigma^2_\theta + s_{\theta_0}^2s_{\phi_0}^2\sigma^2_\phi & c_{\theta}^2 c_{\phi}s_{\phi}\sigma^2_\theta - s_{\theta}^2c_{\phi}s_{\phi}\sigma^2_\phi & -s_{\theta}c_{\theta}c_{\phi} \sigma^2_\theta\\ c_{\theta}^2 c_{\phi}s_{\phi}\sigma^2_\theta - s_{\theta}^2c_{\phi}s_{\phi}\sigma^2_\phi & c_{\theta_0}^2s_{\phi_0}^2\sigma^2_\theta + s_{\theta_0}^2c_{\phi_0}^2\sigma^2_\phi & -s_{\theta}c_{\theta}s_{\phi} \sigma^2_\theta\\ -s_{\theta}c_{\theta}c_{\phi} \sigma^2_\theta & -s_{\theta}c_{\theta}s_{\phi} \sigma^2_\theta & s_{\theta_0}^2\sigma^2_\theta\\ \end{pmatrix} \end{equation} \section{Shower development: basic formul\ae} The longitudinal profile of an electromagnetic shower is described by: \begin{equation} \firstder{E}{t} = E_0 p(t) = E_0 k t^\alpha e^{-bt} \end{equation} where $$ k = \frac{b^{\alpha + 1}}{\Gamma(\alpha + 1)} $$ (with this definition $p(t)$ is normalized to 1 and is therefore a probability density) and the Euler $\Gamma$ function, defined by: $$ \Gamma(\alpha) = \int_{0}^\infty t^{\alpha - 1} e^{-t} \diff t $$ satisfies the well know relation: $$ \Gamma(\alpha + 1) = \alpha \Gamma(\alpha) $$ The position of the shower maximum is given by the condition: $$ \left.\firstder{p}{t}\right|_{\tmax} = k \tmax^{\alpha - 1} e^{-b\tmax} (\alpha - b\tmax) = 0 $$ and therefore: \begin{equation} \tmax = \frac{\alpha}{b} \end{equation} The other two pieces of necessary information are the dependences of $\alpha$ and $b$ on the energy. These are given by the relations: \begin{equation} b \approx 0.5 \end{equation} and: \begin{equation} \tmax = \frac{\alpha}{b} = \ln\left(\frac{E_0}{E_c}\right) + C \end{equation} where $C=0.5$ for photons and $C=-0.5$ for electrons and $E_c$ is the critical energy for the material. \section{Longitudinal moments} Let's start from the calculation of the lowest order moments of the shower longitudinal profile around $t=0$. The first one is the mean: \begin{align} \left< t \right> &= \mu = \int_{0}^\infty t p(t) ] \diff t = k \int_{0}^\infty t^{\alpha + 1} e^{-bt} \diff t =\nonumber\\ &= \frac{b^{\alpha + 1}}{\Gamma(\alpha + 1)} \frac{\Gamma(\alpha + 2)}{b^{\alpha + 2}} = \frac{(\alpha + 1)}{b} \end{align} (i.e. the mean of the profile is exactly $1/b$ radiation lengths to the right of the shower maximum). Along the same lines: \begin{align} \left< t^2 \right> = \frac{b^{\alpha + 1}}{\Gamma(\alpha + 1)} \frac{\Gamma(\alpha + 3)}{b^{\alpha + 3}} = \frac{(\alpha + 2)(\alpha + 1)}{b^2} \end{align} and: \begin{align} \left< t^3 \right> = \frac{b^{\alpha + 1}}{\Gamma(\alpha + 1)} \frac{\Gamma(\alpha + 4)}{b^{\alpha + 4}} = \frac{(\alpha + 3)(\alpha + 2)(\alpha + 1)}{b^3} \end{align} We can apply the usual formul\ae\ for the moments $M_n$ centered around the mean (as opposed to the ones centered around 0): \begin{equation} M_2 = \sigma^2 = \left< t^2 \right> - \mu^2 = \frac{(\alpha + 1)}{b^2} \end{equation} and \begin{equation} M_3 = \left< t^3 \right> - 3\mu\sigma^2 - \mu^3 = \frac{2(\alpha + 1)}{b^3} \end{equation} The skewness $\gamma$ is given by: \begin{equation} \gamma = \frac{M_3}{\sigma^3} = \frac{2}{\sqrt{\alpha + 1}} \end{equation} Let's look at the problem from a different perspective, which will hopefully turn out to be handy in the following. Integrating by parts, we get: \begin{align*} \left< t^n \right> & = k \int_{0}^\infty t^n \cdot t^\alpha e^{-bt} \diff t = k \int_{0}^\infty t^\alpha e^{-bt} \diff\left(\frac{t^{n+1}}{n+1}\right) =\\ &= k\left.\frac{t^{n+1}}{n+1} t^\alpha e^{-bt}\right|_0^\infty - k \int_0^\infty \frac{t^{n+1}}{n+1} \left( \alpha t^{\alpha - 1}e^{-bt} - bt^\alpha e^{-bt} \right) \diff t =\\ &= \frac{kb}{n+1} \int_{0}^\infty t^{\alpha + n + 1} e^{-bt} \diff t - \frac{k\alpha}{n+1} \int_{0}^\infty t^{\alpha + n} e^{-bt} \diff t = \frac{b \left< t^{n+1} \right> - \alpha\left< t^n \right>}{n+1} \end{align*} from which it follows that: \begin{equation} \left< t^{n+1} \right> = \frac{(\alpha + n + 1)}{b}\left< t^n \right> \end{equation} For $n = 1$ we get: $$ \left< t^2 \right> = \frac{(\alpha + 2)}{b}\left< t \right> $$ or: \begin{equation} \sigma^2 = \frac{(\alpha + 2)}{b}\mu - \mu^2 \end{equation} Whereas for $n = 2$: $$ \left< t^3 \right> = \frac{(\alpha + 3)}{b}\left< t^2 \right> $$ which translates into: \begin{equation} \gamma = \frac{\mu}{\sigma^3}\left[ \frac{(\alpha + 3)(\alpha + 2)}{b^2} - 3\sigma^2 - \mu^2 \right] \end{equation} All this equations can be directly verified by plugging in the expressions for $\mu$, $\sigma$ and $\gamma$ explicitly obtained before, but the hope is to generalize them to the case in which we don't sample the entire shower (see the following section). \section{Longitudinal moments over a finite interval} We can generalize the previous relations to the case in which we only sample a finite fraction of the longitudinal shower development, say between $t_1$ and $t_2$. The formalism is essentially identical, except for the fact that now we're dealing with a probability density function over a finite interval: $$ p_{\rm f}(t) = k_{\rm f} t^\alpha e^{-bt} $$ with $k_{\rm f}$ being: $$ k_{\rm f} = \frac{1}{\int_{t_1}^{t_2} t^\alpha e^{-bt} \diff t} $$ (physically $k_{\rm f}$ is the ratio between the raw energy deposited in the calorimeter and the true energy of the particle). So now we have: \begin{equation} \left< t^{n+1} \right> = \frac{(\alpha + n + 1)}{b}\left< t^n \right> - \left.\frac{k_{\rm f}}{b} t^{(\alpha + n + 1)} e^{-bt}\right|_{t_1}^{t_2} \end{equation} and therefore: \begin{equation} \left< t^2 \right> = \frac{(\alpha + 2)}{b}\left< t \right> - \frac{k_{\rm f}}{b} \left[t_2^{(\alpha + 2)} e^{-bt_2} - t_1^{(\alpha + 2)} e^{-bt_1}\right] \end{equation} and: \begin{equation} \left< t^3 \right> = \frac{(\alpha + 3)}{b}\left< t^2 \right> - \frac{k_{\rm f}}{b} \left[t_2^{(\alpha + 3)} e^{-bt_2} - t_1^{(\alpha + 3)} e^{-bt_1}\right] \end{equation} Some more formula that might turn out to be useful for the normalization of the skewness to the expected value for electronmagnetic showers. The moments of the longitudinal distribution can be written as a function of the incomplete gamma function, defined as: \begin{equation} \gamma(\alpha, t) = \frac{1}{\Gamma(\alpha)} \int_{0}^{t} t^{\alpha - 1} e^{-t} \diff t \end{equation} from which it follows that: \begin{equation} \int_{t_1}^{t_2} t^{\alpha} e^{-bt} \diff t = \frac{\Gamma(\alpha+1)}{b^{\alpha+1}} \left( \gamma(\alpha+1, bt_2) - \gamma(\alpha+1, bt_1) \right) \end{equation} If we define: $$ {\mathcal G}(\alpha, b, t_1, t_2) = \frac{\Gamma(\alpha)}{b^{\alpha}} \left( \gamma(\alpha, bt_2) - \gamma(\alpha, bt_1) \right) $$ we have: \begin{align} \left< t^n \right> = \frac{{\mathcal G}(\alpha + n + 1, b, t_1, t_2)} {{\mathcal G}(\alpha + 1, b, t_1, t_2)} \end{align} \clearpage \appendix \emph{Caution: the stuff in the appendix is mostly crap, at this time. I'll move it into appropriate sections as soon as it's in a reasonable shape (and, of course, this does not mean that people should not take a look).} Let's go back to the basic equation for the principal eigenvector: $$ \itm\mathbf{e}^1 = \lambda_1\mathbf{e}^1 $$ Doing a full error propagation is not easy, since in this equation we do have error on the six independent components of the inertia tensor, as well as on the eigenvalue $\lambda_1$ we've just calculated. The errors on the $\itc{ij}$ are reasonably easy to calculate, starting from the errors associated with the finite dimensions of the crystals. On the other side the propagation of the errors to $\lambda_1$ is not trivial, as the expression is complicated. On top of that, these different error are not indipendent from each other, as $\lambda_1$ is calculated starting from the component of the inertia tensor. The solution to this equation is: \begin{align} e^1_x &= \frac{1}{\sqrt{1 + \frac{A^2}{B^2} + \frac{A^2}{C^2}}}\\ e^1_y &= \frac{1}{\sqrt{1 + \frac{B^2}{A^2} + \frac{B^2}{C^2}}}\\ e^1_z &= \frac{1}{\sqrt{1 + \frac{C^2}{A^2} + \frac{C^2}{B^2}}} \end{align} where: \begin{align} A &= \itc{yz}(\itc{xx} - \lambda_1) - \itc{xy}\itc{xz}\\ B &= \itc{xz}(\itc{yy} - \lambda_1) - \itc{xy}\itc{yz}\\ C &= \itc{xy}(\itc{zz} - \lambda_1) - \itc{xz}\itc{yz} \end{align} \begin{thebibliography}{100} \bibitem{goldstein}H.~Goldstein, \emph{Classical mechanics}. \bibitem{landau}L.~D.~Landau, E.~M.~Lif\^sic, \emph{Mechanics}. \bibitem{wolfram}\url{http://mathworld.wolfram.com/CubicFormula.html} \bibitem{pdg}PDG Review of Particle Physics, \emph{Physics Letters B.} (2004) {\bf 592} \bibitem{errors}T.~Soler and B.~H.~W.~van~Gelder, \emph{Geophys. J. Int.} (1991) {\bf 105}, 537--546. \bibitem{errors_corr}T.~Soler and B.~H.~W.~van~Gelder, \emph{Geophys. J. Int.} (2006) {\bf 165}, 382. \end{thebibliography} \end{document}
{ "alphanum_fraction": 0.6221351668, "avg_line_length": 33.384341637, "ext": "tex", "hexsha": "fa674558d9b28f4cab3fe2022444e4dc9571f38c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "69e123b523770baa1fc9e8f3b78e211b1064b0c0", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "fermi-lat/CalRecon", "max_forks_repo_path": "doc/moments_analysis.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "69e123b523770baa1fc9e8f3b78e211b1064b0c0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "fermi-lat/CalRecon", "max_issues_repo_path": "doc/moments_analysis.tex", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "69e123b523770baa1fc9e8f3b78e211b1064b0c0", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "fermi-lat/CalRecon", "max_stars_repo_path": "doc/moments_analysis.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 15648, "size": 37524 }
%---------------------------------------------------------------------------- % Magic tutorial number 11 %---------------------------------------------------------------------------- \NeedsTeXFormat{LaTeX2e}[1994/12/01] \documentclass[letterpaper,twoside,12pt]{article} \usepackage{epsfig,times} \setlength{\textwidth}{8.5in} \addtolength{\textwidth}{-2.0in} \setlength{\textheight}{11.0in} \addtolength{\textheight}{-2.0in} \setlength{\oddsidemargin}{0in} \setlength{\evensidemargin}{0pt} \setlength{\topmargin}{-0.5in} \setlength{\headheight}{0.2in} \setlength{\headsep}{0.3in} \setlength{\topskip}{0pt} \def\hinch{\hspace*{0.5in}} \def\starti{\begin{center}\begin{tabbing}\hinch\=\hinch\=\hinch\=hinch\hinch\=\kill} \def\endi{\end{tabbing}\end{center}} \def\ii{\>\>\>} \def\mytitle{Magic Tutorial \#11: Using IRSIM and RSIM with Magic} \def\_{\rule{0.6em}{0.5pt}} %---------------------------------------------------------------------------- \begin{document} \makeatletter \newcommand{\ps@magic}{% \renewcommand{\@oddhead}{\mytitle\hfil\today}% \renewcommand{\@evenhead}{\today\hfil\mytitle}% \renewcommand{\@evenfoot}{\hfil\textrm{--{\thepage}--}\hfil}% \renewcommand{\@oddfoot}{\@evenfoot}} \newcommand{\ps@mplain}{% \renewcommand{\@oddhead}{}% \renewcommand{\@evenhead}{}% \renewcommand{\@evenfoot}{\hfil\textrm{--{\thepage}--}\hfil}% \renewcommand{\@oddfoot}{\@evenfoot}} \makeatother \pagestyle{magic} \thispagestyle{mplain} \begin{center} {\bfseries \Large \mytitle} \\ \vspace*{0.5in} {\itshape Michael Chow} \\ {\itshape Mark Horowitz} \\ \vspace*{0.5in} Computer Systems Laboratory \\ Center for Integrated Systems \\ Stanford University \\ Stanford, CA 94305 \\ \vspace*{0.25in} This tutorial corresponds to Magic version 7. \\ \end{center} \vspace*{0.5in} {\noindent\bfseries\large Tutorials to read first:} \starti \> Magic Tutorial \#1: Getting Started \\ \> Magic Tutorial \#2: Basic Painting and Selection \\ \> Magic Tutorial \#4: Cell Hierarchies \\ \> Magic Tutorial \#8: Circuit Extraction \endi {\noindent\bfseries\large Commands introduced in this tutorial:} \starti \> :getnode, :rsim, :simcmd, :startrsim \endi {\noindent\bfseries\large Macros introduced in this tutorial:} \starti \> {\itshape (None)} \endi \vspace*{0.25in} \section{Introduction} This tutorial explains how to use Magic's interface to the switch-level circuit simulators, RSIM and IRSIM. The interface is the same for both these simulators and, except where noted, RSIM refers to IRSIM as well. This interface eliminates the tedium of mapping node names to objects in the layout and typing node names as RSIM input. It allows the user to select nodes using the mouse and apply RSIM commands to them or to display the node values determined by RSIM in the layout itself. You should already be familiar with using both RSIM and Magic's circuit extractor. Section 2 describes how to prepare the files necessary to simulate a circuit. Section 3 describes how to run RSIM interactively under Magic. Section 4 explains how to determine the node names that RSIM uses. Lastly, section 5 explains how to use the RSIM tool in Magic to simulate a circuit. \section{Preparations for Simulation} Magic uses the RSIM input file when it simulates the circuit. Before proceeding any further, make sure you have the correct versions of the programs {\bfseries ext2sim} and {\bfseries rsim} installed on your system. Important changes have been made to these programs to support simulation within Magic. To try out this tool on an example, copy all the {\bfseries tut11{\itshape x}} cells to your current directory with the following command: \starti \ii {\bfseries cp \~{}cad/lib/magic/tutorial/tut11* .} \endi The {\bfseries tut11a} cell is a simple 4-bit counter using the Magic scmos technology file. Start Magic on the cell {\bfseries tut11a}, and extract the entire cell using the command: \starti \ii {\bfseries :extract all} \endi When this command completes, several {\bfseries .ext} files will be created in your current directory by the extractor. The next step is to flatten the hierarchy into a single representation. Return to the Unix c-shell by quitting Magic. The program {\bfseries ext2sim} is used to flatten the hierarchy. Run this program from the C-shell by typing: \starti \ii {\bfseries ext2sim -L -R -c 20 tut11a} \endi This program will create the file {\bfseries tut11a.sim} in your current directory. If you are running IRSIM, the {\bfseries tut11a.sim} can be used directly as input to the simulator and you should skip the next step. Instead, if you will be using RSIM, the last step is to create the binary representation of the flattened hierarchy by using the program {\bfseries presim}. To do this, type: \starti \ii {\bfseries presim tut11a.sim tut11a.rsm \~{}cad/lib/scmos100.prm -nostack -nodrops} \endi The third file is the parameter file used by presim for this circuit. The convention at Stanford is to use the suffix {\itshape .rsm} when naming the RSIM input file. The file {\bfseries tut11a.rsm} can also be used as input for running RSIM alone. \section{Using RSIM} Re-run Magic again to edit the cell {\bfseries tut11a}. We'll first learn how to run RSIM in interactive mode under Magic. To simulate the circuit of tut11a, using IRSIM type the command: \starti \ii {\bfseries :rsim scmos100.prm tut11a.sim} \endi To simulate the circuit of tut11a, using RSIM type the command: \starti \ii {\bfseries :rsim tut11a.rsm} \endi You should see the RSIM header displayed, followed by the standard RSIM prompt ({\bfseries rsim$>$} or {\bfseries irsim$>$}, depending on the simulator) in place of the usual Magic prompt; this means keyboard input is now directed to RSIM. This mode is very similar to running RSIM alone; one difference is that the user can escape RSIM and then return to Magic. Also, the mouse has no effect when RSIM is run interactively under Magic. Only one instance of RSIM may be running at any time under Magic. The simulation running need not correspond to the Magic layout; however, as we shall see later, they must correspond for the RSIM tool to work. All commands typed to the RSIM prompt should be RSIM commands. We'll first run RSIM, then escape to Magic, and then return back to RSIM. Type the RSIM command \starti \ii {\bfseries @ tut11a.cmd} \endi to initialize the simulation. (Note there is a `` '' after the @.) Now type {\bfseries c} to clock the circuit. You should see some information about some nodes displayed, followed by the time. Set two of the nodes to a logic ``1'' by typing {\bfseries h RESET{\_}B hold}. Step the clock again by typing {\bfseries c}, and RSIM should show that these two nodes now have the value ``1''. You can return to Magic without quitting RSIM and then later return to RSIM in the same state in which it was left. Escape to Magic by typing: \starti \ii {\bfseries . } \endi (a single period) to the RSIM prompt. Next, type a few Magic commands to show you're really back in Magic (signified by the Magic prompt). You can return to RSIM by typing the Magic command {\bfseries rsim} without any arguments. Type: \starti \ii {\bfseries :rsim} \endi The RSIM prompt will be displayed again, and you are now back in RSIM in the state you left it in. Experiment with RSIM by typing some commands. To quit RSIM and return to Magic, type: \starti \ii {\bfseries q} \endi in response to the RSIM prompt. You'll know you're back in Magic when the Magic prompt is redisplayed. If you should interrupt RSIM (typing a control-C), you'll probably kill it and then have to restart it. RSIM running standalone will also be killed if you interrupt it. If you interrupt IRSIM (typing a control-C), the simulator will abort whatever it's doing (a long simulation run, for example) and return to the command interpreter by prompting again with {\bfseries irsim$>$}. \section{Node Names} It's easy to determine node names under Magic. First, locate the red square region in the middle right side of the circuit. Move the cursor over this region and select it by typing {\bfseries s}. To find out the name for this node, type: \starti \ii {\bfseries :getnode} \endi Magic should print that the node name is {\itshape RESET{\_}B}. The command {\bfseries getnode} prints the names of all nodes in the current selection. Move the cursor over the square blue region in the upper right corner and add this node to the current selection by typing {\bfseries S}. Type {\bfseries :getnode} again, and Magic should print the names of two nodes; the blue node is named {\itshape hold}. You can also print aliases for the selected nodes. Turn on name-aliasing by typing: \starti \ii {\bfseries :getnode alias on} \endi Select the red node again, and type {\bfseries :getnode}. Several names will be printed; the last name printed is the one RSIM uses, so you should use this name for RSIM. Note that {\bfseries getnode} is not guaranteed to print all aliases for a node. Only those alises generated when the RSIM node name is computed are printed. However, most of the alaiases will usually be printed. Printing aliases is also useful to monitor the name search, since {\bfseries getnode} can take several seconds on large nodes. Turn off aliasing by typing: \starti \ii {\bfseries :getnode alias off} \endi {\bfseries getnode} works by extracting a single node. Consequently, it can take a long time to compute the name for large nodes, such as {\itshape Vdd} or {\itshape GND}. Select the horizontal blue strip on top of the circuit and run {\bfseries :getnode} on this. You'll find that this will take about six seconds for {\bfseries getnode} to figure out that this is {\itshape Vdd}. You can interrupt {\bfseries getnode} by typing {\bfseries \^{}C} (control-C), and {\bfseries getnode} will return the ``best'' name found so far. There is no way to tell if this is an alias or the name RSIM expects unless {\bfseries getnode} is allowed to complete. To prevent these long name searches, you can tell {\bfseries getnode} to quit its search when certain names are encountered. Type: \starti \ii {\bfseries :getnode abort Vdd} \endi Select the blue strip on top of the circuit and type {\bfseries :getnode}. You'll notice that the name was found very quickly this time, and {\bfseries getnode} tells you it aborted the search of {\itshape Vdd}. The name returned may be an alias instead of the the one RSIM expects. In this example, the abort option to {\bfseries getnode} will abort the name search on any name found where the last component of the node name is {\itshape Vdd}. That is, {\bfseries getnode} will stop if a name such as ``miasma/crock/{\itshape Vdd}'' or ``hooha/{\itshape Vdd}'' is found. You can abort the search on more than one name; now type {\bfseries :getnode abort GND}. Select the bottom horizontal blue strip in the layout, and type {\bfseries :getnode}. The search will end almost immediately, since this node is {\itshape GND}. {\bfseries getnode} will now abort any node name search when either {\itshape Vdd} or {\itshape GND} is found. The search can be aborted on any name; just supply the name as an argument to {\bfseries getnode abort}. Remember that only the last part of the name counts when aborting the name search. To cancel all name aborts and resume normal name searches, type: \starti \ii {\bfseries :getnode abort} \endi {\bfseries getnode} will no longer abort the search on any names, and it will churn away unless interrupted by the user. \section{RSIM Tool} You can also use the mouse to help you run RSIM under Magic. Instead of typing node names, you can just select nodes with the mouse, tell RSIM what to do with these nodes, and let Magic do the rest. Change tools by typing: \starti \ii {\bfseries :tool rsim} \endi or hit the space bar until the cursor changes to a pointing hand. The RSIM tool is active when the cursor is this hand. The left and right mouse buttons have the same have the same function as the box tool. You use these buttons along with the select command to select the nodes. The middle button is different from the box tool. Clicking the middle button will cause all nodes in the selection to have their logical values displayed in the layout and printed in the text window. We need to have RSIM running in order to use this tool. Start RSIM by typing: \starti \ii {\bfseries :startrsim tut11a.rsm} \endi The {\bfseries .rsm} file you simulate must correspond to the root cell of the layout. If not, Magic will generate node names that RSIM will not understand and things won't work properly. If any paint is changed in the circuit, the circuit must be re-extracted and a new {\bfseries .rsm} file must be created to reflect the changes in the circuit. Magic will print the RSIM header, but you return to Magic instead of remaining in RSIM. This is an alternate way of starting up RSIM, and it is equivalent to the command {\bfseries rsim tut11a.rsm} and typing a period ({\bfseries .}) to the RSIM prompt, escaping to Magic. We need to initialize RSIM, so get to RSIM by typing {\bfseries :rsim} and you'll see the RSIM prompt again. As before, type {\bfseries @ tut11a.cmd} to the RSIM prompt to initialize everything. Type a period ({\bfseries .}) to return to Magic. We are now ready to use the RSIM tool. As mentioned earlier, {\bfseries tut11a} is a 4-bit counter. We'll reset the counter and then step it using the RSIM tool. Locate the square blue area on the top right corner of the circuit. Place the cursor over this region and select it. Now click the middle button, and the RSIM value for this node will be printed in both the text window and in the layout. Magic/RSIM will report that the node is named {\itshape hold} and that its current value is {\itshape X}. You may not be able to see the node value in the layout if you are zoomed out too far. Zoom in closer about this node if necessary. Try selecting other nodes, singly or in groups and click the middle button to display their values. This is an easy way to probe nodes when debugging a circuit. Select {\itshape hold} again (the blue square). This node must be a ``1'' before resetting the circuit. Make sure this is the only node in the current selection. Type: \starti \ii {\bfseries :simcmd h} \endi to set it to a ``1''. Step the clock by typing: \starti \ii {\bfseries :simcmd c} \endi Click the middle button and you will see that the node has been set to a ``1.'' The Magic command {\bfseries simcmd} will take the selected nodes and use them as RSIM input. These uses of {\bfseries simcmd} are like typing the RSIM commands {\itshape h hold} followed by {\itshape c}. The arguments given to {\bfseries simcmd} are normal RSIM commands, and {\bfseries simcmd} will apply the specified RSIM command to each node in the current selection. Try RSIM commands on this node (such as {\itshape ?} or {\itshape d}) by using the command as an argument to {\bfseries simcmd}. You can enter RSIM interactively at any time by simply typing {\bfseries :rsim}. To continue using the RSIM tool, escape to Magic by typing a period ({\bfseries .}) to the RSIM prompt. The node {\itshape RESET{\_}B} must be set to a ``0''. This node is the red square area at the middle right of the circuit. Place the cursor over this node and select it. Type the Magic commands {\bfseries :simcmd l} followed by {\bfseries :simcmd c} to set the selected node to a ``0''. Click the middle mouse button to check that this node is now ``0''. Step the clock once more to ensure the counter is reset. Do this using the {\bfseries :simcmd c} command. The outputs of this counter are the four vertical purple strips at the bottom of the circuit. Zoom in if necessary, select each of these nodes, and click the middle button to check that all are ``0''. Each of these four nodes is labeled {\itshape bit{\_}x}. If they are all not ``0'', check the circuit to make sure {\itshape hold=1} and {\itshape RESET{\_}B=0}. Assuming these nodes are at their correct value, you can now simulate the counter. Set {\itshape RESET{\_}B} to a ``1'' by selecting it (the red square) and then typing {\bfseries :simcmd h}. Step the clock by typing {\bfseries :simcmd c}. Using the same procedure, set the node {\itshape hold} (the blue square) to a ``0''. We'll watch the output bits of this counter as it runs. Place the box around all four outputs (purple strips at the bottom) and zoom in so their labels are visible. Select one of the outputs by placing the cursor over it and typing {\bfseries s}. Add the other three outputs to the selection by placing the cursor over each and typing {\bfseries S}. These four nodes should be the only ones in the selection. Click the middle mouse button to display the node values. Step the clock by typing {\bfseries :simcmd c}. Click the middle button again to check the nodes. Repeat stepping the clock and displaying the outputs several times, and you'll see the outputs sequence as a counter. If you also follow the text on the screen, you'll also see that the outputs are also being watched. You may have noticed that the results are printed very quickly if the middle button is clicked a second time without changing the selection. This is because the node names do not have to be recomputed if the selection remains unchanged. Thus, you can increase the performance of this tool by minimizing selection changes. This can be accomplished by adding other nodes to the current selection that you are intending to check. To erase all the RSIM value labels from the layout, clear the selection by typing: \starti \ii {\bfseries :select clear} \endi and then click the middle mouse button. The RSIM labels do not affect the cell modified flag, nor will they be written in the {\bfseries .mag} file. When you're finished using RSIM, resume RSIM by typing {\bfseries :rsim} and then quit it by typing a {\bfseries q} to the RSIM prompt. Quitting Magic before quitting RSIM will also quit RSIM. We've used a few macros to lessen the typing necessary for the RSIM tool. The ones commonly used are: \starti \ii {\bfseries :macro h ``simcmd h''} \\ \ii {\bfseries :macro l ``simcmd l''} \\ \ii {\bfseries :macro k ``simcmd c''} \endi \end{document}
{ "alphanum_fraction": 0.7392855213, "avg_line_length": 39.5363247863, "ext": "tex", "hexsha": "99b8ae546b1565e9140a38e3ebc332257a446dbd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_forks_repo_licenses": [ "TCL", "X11", "MIT" ], "max_forks_repo_name": "wisehackermonkey/magic", "max_forks_repo_path": "doc/latexfiles/tut11.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "TCL", "X11", "MIT" ], "max_issues_repo_name": "wisehackermonkey/magic", "max_issues_repo_path": "doc/latexfiles/tut11.tex", "max_line_length": 99, "max_stars_count": null, "max_stars_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_stars_repo_licenses": [ "TCL", "X11", "MIT" ], "max_stars_repo_name": "wisehackermonkey/magic", "max_stars_repo_path": "doc/latexfiles/tut11.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4968, "size": 18503 }
%%%%%%%%%%%%%Article template by C Has%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \documentclass[10pt, a4paper]{article} % %Important packages \usepackage[T1]{fontenc} \usepackage{times} \usepackage{amsmath, amssymb, amsfonts} \usepackage{graphicx} \usepackage{caption, subcaption} \usepackage[margin=2cm]{geometry} \usepackage[onehalfspacing]{setspace} \usepackage[round, sort & compress]{natbib} \usepackage{hyperref} %Title info... \title{\vspace{-15mm} Article and Manuscript Writing in \LaTeX} \author{Chandra Has\\ [email protected]} \begin{document} \maketitle \begin{abstract} Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. \\ \noindent \textbf{Keywords:} Article, \LaTeXe\ tutorial, manuscript, template \end{abstract} %\tableofcontents %\listoffigures %\listoftables \section{Introduction} Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. ILorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. \subsection{Title of first subsection} Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. \subsubsection{Title of first subsubsection} Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. \paragraph{Title of paragraph.} Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged~\citep{Magnetism}. \section{Figure, table, equation} Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged~\citep{Magnetism}. See figure~\ref{tab: table-1} and table~\ref{fig:logo}. \begin{figure}[tbp] \centering \includegraphics[width=0.35\linewidth]{gfx} \caption{Lorem Ipsum is simply dummy text of the printing and typesetting industry.} \label{fig:logo} \end{figure} \begin{table}[tbp] \centering \caption{Lorem Ipsum is simply dummy text of the printing and typesetting industry.} \label{tab: table-1} \begin{tabular}{p{1cm}p{1cm}p{1cm}p{1cm}p{1cm}p{1cm}} \hline A & B & C & D & E & F \\ \hline 1 & 2 & 3 & 4 & 5 & 6 \\ 6 & 5 & 4 & 3 & 2 & 1 \\ 3 & 2 & 1 & 2 & 3 & 4 \\ 1 & 2 & 4 & 5 & 6 & 3 \\ 4 & 3 & 2 & 1 & 5 & 6 \\ \hline \end{tabular} \end{table} \begin{equation} y = \ln x \end{equation} \appendix \section{First appendix} Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchangedI~\citep{Davies1998}. \begin{equation} Y^2 = 4ax \end{equation} \bibliographystyle{plainnat} \bibliography{mybib} \end{document}
{ "alphanum_fraction": 0.761676397, "avg_line_length": 47.0196078431, "ext": "tex", "hexsha": "9468449edd88a4b4bc5dd854ee76f7de760de4db", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d803fd0d70aabc51a92030f029d67286ce1b33d1", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "datasticslab/ISCB1002", "max_forks_repo_path": "01-Article/edited/article/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d803fd0d70aabc51a92030f029d67286ce1b33d1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "datasticslab/ISCB1002", "max_issues_repo_path": "01-Article/edited/article/main.tex", "max_line_length": 734, "max_stars_count": 1, "max_stars_repo_head_hexsha": "d803fd0d70aabc51a92030f029d67286ce1b33d1", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "datasticslab/ISCB1002", "max_stars_repo_path": "01-Article/edited/article/main.tex", "max_stars_repo_stars_event_max_datetime": "2020-11-28T15:36:29.000Z", "max_stars_repo_stars_event_min_datetime": "2020-11-28T15:36:29.000Z", "num_tokens": 1248, "size": 4796 }
% Created: Enze Chen, July 2017 % Last edited: Enze Chen, December 2017 % % Chapter 8 of the MSE 142 coursereader. This chapter discusses time-dependent perturbation theory. The various approximations for the probability amplitudes are discussed, and particular focus is drawn to sinusoidal perturbations. These model electric fields, which can be applied to stimulated emission in lasers. % Uncomment the following three lines and last line to individually compile this chapter %\documentclass[12pt, english]{book} %\usepackage{142crstyle} %\begin{document} \chapter[Perturbation Theory]{Time-dependent Perturbation Theory} \label{ch:pert} %{ \doublespacing For the final topic covered in this text, we would like to understand the interaction of light with matter---how materials absorb light, emit light, and the basic ideas behind the operation of the laser. Much of the phenomenon here can be explained using QED, but for the sake of simplicity we will return to the single-particle wave functions used in first quantization. However, one of the key differences between these interactions and our previous examples is that our Hamiltonian is now time-dependent, so instead of solving for a stationary state we must now solve for a superposition state. This framework allows us to model atomic transitions between energy levels due to the emission or absorption of radiation by an atom. \section{General formalism} To start off, let's suppose we have a quantum system with known Hamiltonian $H_0(r)$\footnote{We use $r$ here to represent the position vector, which you can view equivalent to $x$.} which we have already solved the \Sch\ equation for, i.e. \begin{equation*} H_0\Psi_n = i\hbar \pdv{\Psi_n}{t} \end{equation*} with stationary state solutions \begin{equation*} \Psi_n(r,t) = \psi_n(r)e^{-iE_nt/\hbar} \end{equation*} We can now apply a small perturbation $H'(r,t)$ of strength $\lambda$ such that the total Hamiltonian becomes \begin{tcolorbox}[title = Hamiltonian for small perturbations] \vspace{-2ex} \begin{equation} \hat{H}(r,t) = H_0(r) + \lambda H'(r,t) \label{eq:ham-pert} \end{equation} \end{tcolorbox} where all of the time-dependence is accounted for by the second term. Here, $\lambda \ll 1$ characterizes the strength of the perturbation. As we will see, the problem becomes too difficult to solve in general for arbitrary $\lambda$, but in the case when $\lambda$ is small, we can develop a simple approximate theory. The theory will be quite general---only at the end will we apply it to specific examples associated the interactions between light and matter. In any case, the time-dependent \Sch\ equation that we are now trying to solve becomes \begin{equation} \left(H_0 + \lambda H'\right)\Psi = i\hbar \pdv{\Psi}{t} \label{eq:sch-pert} \end{equation} Since this general equation is impossible to solve exactly, we will employ the method of \textbf{eigenfunction expansion}, which you've already seen for the particle in a box. We expand the general solution $\Psi(r,t)$ we are trying to find in terms of the known solutions of $H_0$: \begin{equation} \Psi(r,t) = \sum_n c_n(t) \Psi_n(r,t) \label{eq:exp-pert} \end{equation} where the individual states $\Psi_n$ are orthonormal basis vectors. This again corresponds to a quantum superposition state with $c_n(t)$ the amplitude of each state in the superposition and $\abs{c_n(t)}^2$ the probability that a measurement finds the system in state $n$ at time $t$ (technically that a measurement of the energy will return $E_n$). Since we are trying to solve for the case where there is only a slight perturbation to the known Hamiltonian, it's a good guess to try to find solutions which can be expressed in terms of the known solutions of $H_0$. Our goal will be to find a general equation for the $c_n$'s, since the probability amplitudes are now changing in time. Once we know these, we know the full time-dependent evolution of the wave function. \par %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Probability amplitude} If we substitute the expansion in Equation~\ref{eq:exp-pert} into our time-dependent \Sch\ equation, we find \begin{align*} \left(H_0 + \lambda H'\right)\Psi &= i\hbar \pdv{\Psi}{t} \\ H_0 \sum_n c_n(t) \Psi_n(r,t) + \lambda H'\sum_n c_n(t) \Psi_n(r,t) &= i\hbar \pdv{t}\sum_n c_n(t) \Psi_n(r,t) \\ i\hbar\sum_n c_n(t) \pdv{\Psi_n}{t} + \lambda H'\sum_n c_n(t) \Psi_n(r,t) &= i\hbar \sum_n c_n(t) \pdv{\Psi_n}{t} + i\hbar \sum_n \dv{c_n(t)}{t} \Psi_n(r,t) \\ \lambda H'\sum_n c_n(t) \Psi_n(r,t) &= i\hbar \sum_n \dot{c}_n(t) \Psi_n(r,t) \end{align*} In the last line we used dot notation to represent the time derivative of a quantity, such that $\dot{c_n}=\dv{c_n}{t}$. We take the last line and multiply both sides on the left with $\bra{\Psi_k}$ to obtain \begin{equation*} \lambda \sum_n c_n \braket{\Psi_k}{H'\Psi_n} = i\hbar \sum_n \dot{c}_n \braket{\Psi_k}{\Psi_n} \end{equation*} Why is this useful? Because we claimed that the stationary state solutions were orthogonal, this allows us to pick out just one term in the sum on the right hand side, namely the term with $k=n$. All the other terms are zero. Thus, the final equation we find is \begin{tcolorbox}[title = Relationship for $c_k$] \vspace{-2ex} \begin{equation} i\hbar \dot{c}_k = \lambda \sum_n c_n H_{kn}' \label{eq:cdot} \end{equation} \end{tcolorbox} Here, $H_{kn}' = \braket{\Psi_k}{H'\Psi_n}$. These are typically called \textbf{matrix elements} because the perturbing Hamiltonian is a matrix operator and we are interested in the element in the $k$th row and $n$th column of this matrix. Equation~\ref{eq:cdot} is an infinite series of coupled differential equations and not so easy to deal with. But so far, we have been completely general, i.e. we have not made use of the fact that we are trying to solve for the case of a small perturbation. To do this, we guess the solution \begin{equation} \boxed{c_k(t) = c_k^{(0)} + \lambda c_k^{(1)}(t) + \lambda^2 c_k^{(2)}(t) + \cdots} \label{eq:c-pow} \end{equation} which we obtained by expanding $c_k(t)$ in a \textbf{power series in} $\lambda$. The superscript in parentheses indicates the order of the approximation for $c_n$. Again, this is a reasonable assumption in the limit when $\lambda$ is small.\footnote{This is largely a convergence argument, and a brief explanation is given by \href{http://tutorial.math.lamar.edu/Classes/CalcII/PowerSeriesandFunctions.aspx}{Paul Dawkins}.} In the following, what we'll do is find an equation for the first order correction term $c_k^{(1)}$. If we substitute the above equation into both sides of Equation~\ref{eq:cdot} and equate powers of $\lambda$ (since $\lambda$ is arbitrary, those terms with the same power must be equal), we obtain the following equations: \begin{align} i\hbar \dot{c}_k^{(0)} &= 0 \label{eq:cdot0} \\ i\hbar \dot{c}_k^{(1)} &= \sum_n H_{kn}' c_n^{(0)} \label{eq:cdot1} \\ &\vdots \nonumber \\ i\hbar \dot{c}_k^{(n)} &= \sum_n H_{kn}' c_n^{(n-1)} \label{eq:cdotn} \end{align} So, if we can find $c_k^{(0)}$, we can use this to find $c_k^{(1)}$, then $c_k^{(2)}$, and so on. \par As an example, let's assume at time $t=0$, the system is sitting in a stationary state of $H_0$, say $\Psi_l$, such that \begin{equation*} \Psi(r,t)|_{t=0} = \Psi_l(r,t) = \sum_n c_n(t) \Psi_n(r,t) \end{equation*} So what is $c_n^{(0)}$? Well, the summation runs over state $l$, so it must be the case that all the $c_n$'s are zero except when $n=l$, in which case $c_l=1$. This can be written simply in terms of the \textbf{Kronecker delta function} as \begin{equation} c_n^{(0)} = \delta_{n,l} \end{equation} which is 0 if $n\neq l$ and 1 if $n=l$.\footnote{The Kronecker delta function is analogous to the Dirac delta function, just applied to discrete arguments.} So now we have $c_k^{(0)}$ as set by the initial conditions and we can solve for the first order correction term. Using Equation~\ref{eq:cdot1}, \begin{equation} i\hbar \dot{c}_k^{(1)} = \sum_n H_{kn}' \delta_{n,l} = H_{kl}' \end{equation} which leads to \begin{tcolorbox}[title = First order perturbation coefficient] \vspace{-2ex} \begin{equation} c_k^{(1)}(t) = \frac{1}{i\hbar} \int_{0}^{t_0} H_{kl}'(r,t) \dd{t} \label{eq:c1} \end{equation} \end{tcolorbox} The square of this quantity gives the probability of making an atomic transition from state $\Psi_l$ to state $\Psi_k$ after some time $t_0$, at least to first order. If higher accuracy is required, we can easily write down the equation for the second order approximation as follows: \begin{equation*} i\hbar \dot{c}_k^{(2)} = \sum_{m\neq l} H_{km}'c_m^{(1)} \end{equation*} Inserting Equation~\ref{eq:c1} into the right hand side and taking the integral, we get \begin{equation} c_k^{(2)} = -\frac{1}{\hbar^2} \sum_{m\neq l} \int_{t'}^{t_0} H_{km}'(r,t) \left[ \int_{0}^{t'} H_{ml}'(r,t) \dd{t} \right] \dd{t} \end{equation} Of course, we could theoretically continue doing these successive approximations to get higher order corrections and more accurate calculations of $c_k(t)$. The second order approximation allows us to model an intermediate transition from $\Psi_l$ to some state $\Psi_m$ and then finally to $\Psi_k$. In most cases, of course, the first order approximation is sufficient, and certainly in our case as a pedagogical example of the theory.\footnote{Now unless otherwise stated, assume $c_k(t)$ refers to the first order approximation $c_k^{(1)}(t)$ because we set $c^{(0)}_{j \neq l} = 0$.} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Separation of variables} Before we get to the application of this theory, let's take a closer look at Equation~\ref{eq:c1}. One can often factor the perturbation Hamiltonian as $H'(r,t) = \bar{H}'(r)f(t)$, in which case the matrix element $H'_{kl}$ can be written as \begin{align*} H'_{kl} &= \braket{\Psi_k}{H'\Psi_l} \\ &= f(t) \braket{\psi_ke^{-iE_kt/\hbar}}{\bar{H}'\psi_le^{-iE_lt/\hbar}} \\ &= f(t) \braket{\psi_k}{\bar{H}'\psi_l}e^{i(\omega_k-\omega_l)t} \\ &= f(t) \bar{H}'_{kl} e^{i\omega_{kl}t} \end{align*} where we have defined a new matrix element $\bar{H}'_{kl}$ and the quantity $\omega_{kl} = \omega_k - \omega_l = (E_k - E_l)/\hbar$. Note that the matrix element is in principle easy to calculate, at least numerically. We know the solutions of the unperturbed Hamiltonian and therefore can calculate this integral for arbitrary $k$ and $l$. We then have another form of Equation~\ref{eq:c1} for the time-dependent amplitude: \begin{equation} \boxed{c_k(t) = \frac{\bar{H}'_{kl}}{i\hbar} \int_{0}^{t_0} e^{i\omega_{kl}t} f(t) \dd{t}} \label{eq:c1-sep} \end{equation} The first order approximation of the probability of the system making a transition from an initial state $l$ to state $k$ is therefore \begin{tcolorbox}[title = Transition probability] \vspace{-2ex} \begin{equation} P_{l \rightarrow k} = \abs{c_k}^2 = \abs{\frac{\bar{H}'_{kl}}{\hbar}}^2 \abs{\int_{0}^{t} e^{i\omega_{kl}t'} f(t') \dd{t'}}^2 \end{equation} \end{tcolorbox} This is the fundamental result that we will apply in the next section. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Sinusoidal perturbation} As a simple application of the previous equation for the transition probability, let's suppose that at time $t<0$ the atom is in a stationary state $l$. Starting at $t=0$, we apply a monochromatic electromagnetic wave that gives a sinusoidal perturbation $H'(r,t) = 2H'(r)\cos(\omega t)$. Substituting this expression into Equation~\ref{eq:c1-sep}, we find \begin{align*} c_k(t) &= \frac{H'_{kl}}{i\hbar} \int_{0}^{t_0} e^{i\omega_{kl}t} \left( 2\cos(\omega t) \right) \dd{t} \\ &= \frac{H'_{kl}}{i\hbar} \int_{0}^{t_0} e^{i\omega_{kl}t} \left(e^{i\omega t} + e^{-i\omega t} \right) \dd{t} \\ &= \frac{H'_{kl}}{i\hbar} \int_{0}^{t_0} e^{i(\omega_{kl}+\omega)t} + e^{i(\omega_{kl}-\omega)t} \dd{t} \\ &= -\frac{H'_{kl}}{\hbar} \left[ \frac{e^{i(\omega_{kl}+\omega)t}}{\omega_{kl}+\omega} + \frac{e^{i(\omega_{kl}-\omega)t}}{\omega_{kl}-\omega} \right]\bigg|_0^{t_0} \\ \Aboxed{c_k(t) &= -\frac{H'_{kl}}{\hbar} \left[ \frac{e^{i(\omega_{kl} + \omega)t_0} - 1}{\omega_{kl}+\omega} + \frac{e^{i(\omega_{kl} - \omega)t_0} - 1}{\omega_{kl}-\omega} \right]} \numberthis \end{align*} To dissect this equation, let's consider the case of absorption, i.e. a transition from state $l$ to a higher energy state $k$ such that $\omega_{kl} > 0$. It is clear then from the denominators that for $\omega \approx \omega_{kl}$, the second term on the right hand side dominates over the first term. In addition, we have \begin{align*} \omega &\approx \omega_{kl} = \frac{E_k - E_l}{\hbar} \\ E_k &= E_l + \hbar \omega \end{align*} which shows that energy is conserved for the absorption of a quantum of energy $\hbar\omega$. We can further simplify the right hand side using the identity \begin{equation*} e^{i\theta} - 1 = 2ie^{i\theta/2} \sin\left(\frac{\theta}{2}\right) \end{equation*} to obtain a cleaner expression for the transition probability: \begin{align*} c_k(t) &= -\frac{H'_{kl}}{\hbar} \left[ \frac{e^{i(\omega_{kl} - \omega)t} - 1}{\omega_{kl}-\omega} \right] \\ c_k(t) &= -\frac{H'_{kl}}{\hbar} \left[ \frac{2ie^{i(\omega_{kl} - \omega)t/2} \sin(\omega_{kl}-\omega)t/2}{\omega_{kl}-\omega} \right] \\ \Aboxed{P_{l\rightarrow k} &= \abs{c_k}^2 = \frac{\abs{H'_{kl}}^2}{\hbar^2} \left[ \frac{\sin (\omega_{kl}-\omega)t/2}{(\omega_{kl}-\omega)/2} \right]^2} \numberthis \label{eq:prob-abs} \end{align*} This equation is plotted in Figure~\ref{fig:prob-abs} first as a function of $t$ and then as a function of $\omega_{kl}-\omega$. Remarkably, when plotted as a function of time, the transition probability oscillates sinusoidally between a maximum value and 0. At times that are integer multiples of $2\pi/\abs{\omega_{kl}-\omega}$, the particle is guaranteed to be in the lower state; thus if one wants to maximize their chances of causing a transition, they should only leave the perturbation on for odd multiples of $\pi/\abs{\omega_{kl}-\omega}$, which hopefully finds the system in the higher energy state. This cyclic behavior is called \textbf{Rabi flopping} and is an integral components of nuclear magnetic resonance (NMR) and quantum computing. \begin{figure}[!h] \centering \subfloat[]{\includegraphics[width=0.51\linewidth]{absorption-t} \label{fig:prob-abs-t}} \subfloat[]{\includegraphics[width=0.46\linewidth]{absorption-w} \label{fig:prob-abs-w}} \caption{The transition probability from state $l$ to state $k$ plotted as \protect\subref{fig:prob-abs-t} a function of time, which displays oscillatory behavior with nodes at integer multiples of $2\pi/\abs{\omega_{kl}-\omega}$, and \protect\subref{fig:prob-abs-w} a function of frequency, which displays a sharp peak at $\omega=\omega_{kl}$ with width $4\pi/t$.} \label{fig:prob-abs} \end{figure} On the other hand, we see in Figure~\ref{fig:prob-abs-w} that in order to maximize the transition probability, the driving frequency $\omega$ should match the ``natural'' frequency $\omega_{kl}$. The peak has a finite width of $4\pi/t$ and becomes narrower and narrower as time progresses. Thus we see the manifestation of the uncertainty principle in the range of photon energies that are able to drive the transition. On a practical level, the complementarity between time and energy\footnote{Often expressed as $\Delta E \Delta t \ge \frac{\hbar}{2}$, or $\Delta \omega \Delta t \ge \frac{1}{2}$.} is something that we're probably familiar with. If we hear any note (frequency) for a very short amount of time, there is a lot of uncertainty as to what that note is. It's only when the note is extended that the true frequency of the sound wave is determined. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Application: Stimulated emission} The precise control of the driving frequency to drive atomic transitions was leveraged by Charles Townes to construct the first \textbf{maser} (microwave amplification by stimulated emission of radiation) in 1953.\footnote{See this \href{https://physics.aps.org/story/v15/st4}{focus article} from \emph{Physics} about the history surrounding the maser and Townes' \href{https://journals.aps.org/pr/abstract/10.1103/PhysRev.99.1264}{original work}.} The maser was the precursor to the \textbf{laser}, which was invented seven years later and operates under the same principles to produce coherent radiation at visible wavelengths.\footnote{See T. H. Maiman \href{https://www.nature.com/nature/journal/v187/n4736/pdf/187493a0.pdf}{\emph{Nature}} \textbf{187} (1960).} \par Both devices operate based on \textbf{stimulated emission}, which was predicted by Einstein back in 1917. Einstein knew that when photons of the right frequency strike an atom, the atom can ``absorb'' the photon and transition into a higher energy state (the electrons jump to higher energy levels). He then posited that it must be possible for the excited atom to return to a lower energy state by emitting a photon in a process known as \textbf{spontaneous emission}. Einstein, of course, did not stop there. He went one step further and postulated that if a photon of the right frequency encountered an excited atom, then there would be a probability that the photon will stimulate the excited atom to release a photon of identical frequency that traveled in the same direction. This process is illustrated in Figure~\ref{fig:laser}. Since we derived the probability of absorption in the text, we will leave it as an exercise to the reader to show that the probability of emission is in fact equivalent to Equation~\ref{eq:prob-abs}. \begin{figure}[!h] \centering \subfloat[]{\includegraphics[width=0.20\linewidth]{absorption} \label{fig:absorption}} \qquad \subfloat[]{\includegraphics[width=0.23\linewidth]{spont-e} \label{fig:stimu-e}} \qquad \subfloat[]{\includegraphics[width=0.22\linewidth]{stimu-e} \label{fig:spont-e}} \caption{The three ways in which light interacts with atoms include: \protect\subref{fig:absorption} Absorption, where a photon is absorbed by the atom and an electron transitions into an excited state. \protect\subref{fig:stimu-e} Spontaneous emission, where an excited atom emits a photon and an electron transitions to a lower energy state. \protect\subref{fig:spont-e} Stimulated emission, where an incident photon causes an excited atom to emit a coherent photon.} \label{fig:laser} \end{figure} Where stimulated emission becomes exciting is when we have a population inversion of many atoms in the excited state that all have the possibility of emitting coherent radiation. Then we get an \emph{amplification} effect where a single incident photon leads to two resulting photons, which then turn into four photons, and so on in a chain reaction. This is the basis for how masers and lasers function, and scientists are getting increasingly creative with these optical resonators. In particular, nanowire lasers, pioneered by Peidong Yang at UC Berkeley,\footnote{See M. H. Huang et al. \href{http://science.sciencemag.org/content/292/5523/1897.full}{\emph{Science}} \textbf{292}, 5523 (2001).} have recently attracted a lot of excitement in the world of nano-optics because they have demonstrated near 100\% efficiency---every single photon they absorb is used to produce a photon of laser light.\footnote{See H. Zhu et al. \href{http://www.nature.com/nmat/journal/v14/n6/full/nmat4271.html}{\emph{Nature Materials}} \textbf{14} (2015).} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Summary} To recap, in this chapter we analyzed a quantum mechanical system where the Hamiltonian was no longer time-independent, thus exposing us to the framework of time-dependent perturbation theory. It turns out, however, that when the perturbation is small, we can employ our friendly eigenfunction expansion to arrive at a very good first order approximation for the exact solution. We looked at a particular case when the perturbation term is sinusoidal, which is an appropriate model for an electromagnetic field, and the transition probability we derived was sinusoidal in time as well. We finished with an analysis of stimulated emission, which is fundamental to the operation of the laser. %} % for doublespacing %\end{document}
{ "alphanum_fraction": 0.7150123529, "avg_line_length": 101.6896551724, "ext": "tex", "hexsha": "1e7e012213c4bd241db07dd00162026010dbd898", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a98585b32f26f6c189b96345d9cc1e9727156268", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Enze-Chen/mse_142_cr", "max_forks_repo_path": "tex/chapter_8.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a98585b32f26f6c189b96345d9cc1e9727156268", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Enze-Chen/mse_142_cr", "max_issues_repo_path": "tex/chapter_8.tex", "max_line_length": 1043, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a98585b32f26f6c189b96345d9cc1e9727156268", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Enze-Chen/mse_142_cr", "max_stars_repo_path": "tex/chapter_8.tex", "max_stars_repo_stars_event_max_datetime": "2021-01-13T17:08:24.000Z", "max_stars_repo_stars_event_min_datetime": "2021-01-13T17:08:24.000Z", "num_tokens": 5938, "size": 20643 }
\section{ByLaws} By-Laws of the Foxboro Homeowners Association Revision 1/2007 \subsection{ARTICLE I Name of Association} The Association shall be known as the Foxboro Home Owners’ Association of Brentwood, Tennessee (not for profit organization). \subsection{ARTICLE II MISSION STATEMENT} More than a destination at the end of the day, Foxboro is a community you want to call home and where you feel at home. Our goal will be achieved when: \begin{itemize} \item Being part of our community means sharing with your neighbors a common desire to promote harmony and contentment, and the value of living within the Foxboro community. \item The association strikes a reasonable, logical balance between the best interests of individual homeowners and those of the community as a whole. \item Our homeowners and association leaders recognize and accept their rights and responsibilities. \end{itemize} \subsection{ARTICLE III OFFICES} The principal office of the Association shall be located at the residence of the President. \subsection{ARTICLE IV MEETING OF MEMBERS} \subsubsection{SECTION 1: Annual Meeting.} The annual meeting of the Members of the Association shall be held each year at a date and time fixed by the Board of Directors. The secretary shall give notice of the annual meeting to all members of record at least thirty (30) days prior to such meeting specifically outlining the date, time, and location. The Board of Directors may designate any place within Brentwood as the place of meeting for the annual meeting, or any special meeting of the members of the Association. \subsubsection{SECTION 2: Order of Business for Annual Meeting.} \begin{itemize} \item Call Meeting to Order. \item Proof of notice of meeting. \item Reading minutes of last previous annual meeting. \item Report of Officers. \item Reports of Committees. \item Election of Association Board. \item Such miscellaneous business as may come or be properly brought before the meeting. \item Announcements. \item Adjournment. \end{itemize} \subsubsection{SECTION 3: Special Meetings.} Special meetings of Members for any purpose or purposes, may be called at any time by the President, or by a majority of the Board of Directors, and shall be called by the President upon written request of twenty (20\%) percent of the membership. Business transacted at all special meetings shall be confined to the subjects or purposes stated in the call. \subsubsection{SECTION 4: Quorum.} The presence at the meeting of Members entitled to cast, or of proxies entitled to cast, 1/10 of the total votes shall constitute a quorum. If, however, a quorum shall not be present or represented at any meeting, a majority of the Members so represented may vote to commence the meeting and transact business of the Association until adjournment. \subsubsection{SECTION 5: Proxies.} At all meetings of Members, a Member may vote or give his consent to proxy executed in writing by the Member. Such proxy shall and must bear a date not more than five (5) days prior to said meeting, and must be filed with the secretary of the Association before or at the time of the meeting. \subsubsection{SECTION 6: Voting of Members.} Each active member of the Association shall be entitled to one (1) vote for each household owned. In no event shall more than one vote be cast with respect to any household. Normal voting shall be by a voice vote, proxy, or by any method called for by the (a) President, or by (b) by motion and approval of the majority present. \subsection{ARTICLE V BOARD OF DIRECTORS} \subsubsection{SECTION 1: General Powers.} The management of all the affairs and business of the Association shall be vested with the Board of Directors. The general responsibilities of the Board shall include, but not be limited to, the following: \begin{itemize} \item Preparing and adopting an annual budget. \item The establishment of annual dues for the Association. \item Establish and maintain an adequate reserve fund balance for the periodic maintenance, repairs and replacement of the improvements to the common areas or fixed assets, to be maintained out of the annual dues / assessments. \item Providing for maintenance, care, and upkeep of all areas, which are under the responsibility of the Association. \item Keeping all monies of the Association in a bank depository approved by the Board, and using the proceeds to administer the Association. \item Obtaining and carrying insurance against casualties and liabilities, if deemed necessary by the Board. \item Paying the cost of all services rendered to the Association. \item Shall institute, adopt, and maintain policies that protect the Foxboro community as a whole, while protecting the interests of individual homeowners. \item Provide effective community leadership. \item To encourage and promote social relationships, and greater community involvement, among the households of the Foxboro community. \end{itemize} \subsubsection{SECTION 2: Nomination.} Nomination for election to the Board of Directors shall be made by a Nominating Committee, selected by the Board President, or from the floor at the annual meeting of Members. The Nominating Committee shall consist of a chairman, who shall be a member of the Board of Directors, and one or more members of the Association. The committee will nominate a recommended slate of directors at the annual meeting. All candidates shall consent to their nomination. \subsubsection{SECTION 3: Number, Tenure, and Qualifications.} The number of directors of the association shall not be less than four (4) persons who shall be elected at the annual meeting of the Members by a plurality vote of those in attendance with the right to vote in person or by proxy. The term of office is for one (1) calendar year. Each director shall hold office until his successor is elected even though his tenure in office should thereby exceed one (1) year. It shall be a requirement of the office of director that such person be an active member of the Association. \subsubsection{SECTION 4: Election of Officers.} The directors shall elect at the first Board meeting, after each annual meeting of the Association, the following officers, from the elected Board Members, for a term of one (1) calendar year or until their successors are chose even though their tenure of office would thereby exceed one (1) year: President, Vice President, Secretary, and Treasurer. Officers within the Board of Directors will be rotated. No Director may serve more than three (3) consecutive terms within a specific office. There are no specified term limits for participation on the Board of Directors. \subsubsection{SECTION 5: Quorum.} A majority of the number of active elected directors (i.e., greater than 50 per cent) shall constitute a quorum for the transaction of business. \subsubsection{SECTION 6: Vacancies.} Any vacancy occurring in the Board of Directors, regardless of the manner in which caused, may be filled by the affirmative vote of the majority of the remaining directors. A director elected to fill a vacancy shall be elected for the unexpired term of his predecessor in office. \subsubsection{SECTION 7: Board Meetings.} Regular meetings of the Board of Directors shall be held at the call of the President. Board meetings shall be held at least quarterly during the calendar year. All meetings of the board are open to all members of the association. \subsubsection{SECTION 8: Officers.} The term and election of officers is set forth in Article V, Section 4. Each officer, whether elected or appointed, shall hold office until his successor shall have been duly elected. It shall be a requirement that an officer be a member of the Association. \begin{itemize} \item President: Shall in general supervise and control all the business and affairs of the association, subject to the approval of the Board of Directors. He shall, when present, preside at all meetings of the Members and of the Board of Directors. The President shall be an ex-officio member of standing committees and shall be responsible for carrying into effect all orders and resolutions of the Board of Directors and Members as required or as good business dictates. \item Vice President: Shall exercise the functions of the President in the event of absence or disability. Shall have such powers and discharge such duties as may be assigned by the President or by the Board of Directors. \item Secretary: Shall keep the minutes of the annual member meeting and of the Board of Directors meetings in one or more books provide for such purpose. See that all notices are duly given in accordance with the Bylaws or as required by the Board of Directors. Be a custodian of all association records. Keep a register of the mailing addresses of all members. In general, perform all duties incident to the office of the Secretary. \item Treasurer: Shall have charge and custody of and be responsible for all monies of the Association; receive and give receipts for monies due and payable to the Association from any source whatsoever and deposit all such monies in the name of the Association in such bank as shall be selected by the Board. Shall render to the Board of Directors, from time to time, as may be required, an account of all transactions of the Treasurer and of the financial condition of the Association. The treasurer shall 


disperse funds consistent with the approved annual budget as approved by the Board of Directors. Any special project, or extraordinary expenses, will require prior Board approval before disbursement of funds by the treasurer. \end{itemize} \subsubsection{SECTION 9: Other Board Members.} \begin{itemize} \item Board Members at Large: Shall represent the community as a whole at all Board Meetings. Will replace any vacancy in the office of Vice President, Secretary, or treasurer. \item Landscape Coordinator: The landscape coordinator shall be responsible for the development and maintenance of the front entrance to the Foxboro Community. Shall render to the Board of Directors proposed annual budgets for approval. All transactions to be approved by Board. \item President of Women’s Club: Shall represent the interests of the Foxboro Women’s Club at all Board Meetings. \end{itemize} \subsubsection{SECTION 10: Standing or Temporary Committees.} The Board of Directors may appoint standing or temporary committees from its own number from time to time as deemed appropriate in carrying out its purpose. All committees so appointed shall keep regular minutes of the transactions of their meetings and shall record them with the Secretary of the Association. \subsubsection{SECTION 11: Official Records.} The association shall maintain each of the following items, applicable, which constitute the official records of the association: \begin{itemize} \item Minutes. Minutes of all meetings of the members of the association, and board of directors must be maintained in written form. These minutes must be maintained for at least 7 years. \item A copy of the Bylaws of the association and of each amendment. \item A copy of the Covenants of each section within Foxboro Neighborhood. \item A current roster of all members and their mailing addresses. \item All of the association’s insurance policies or a copy thereof, such policies must be retained for at least 7 years. \item A current copy of all contracts to which the association is a party. Bids received by the association for work to be performed are considered official records and must be kept for a period of 1 year. \item The financial and accounting records of the association, kept according to good practices. All financial records must be kept for a period of at least 7 years. This shall include: accurate, itemized, and detailed records of all receipts and expenditures. \end{itemize} \subsection{Article VI Financial Matters of Association} \subsubsection{SECTION 1: Financial Reporting.} The association shall prepare an annual financial report for submission to the membership at the annual meeting. A copy of the financial report is available upon request, and shall consist of: \begin{enumerate}[label=\alph*)] \item The amount of receipts and expenditures by classification; and \item The beginning and ending cash balances of the association. \end{enumerate} \subsubsection{SECTION 2: Dues.} \begin{enumerate}[label=\alph*)] \item All annual fees are payable on or before March 31st. \item The Board of Directors is empowered to set the annual membership fee based upon the financial needs of the association. The Board at the annual meeting shall communicate notice of such changes in the membership dues. \item New residents with occupancy prior to July 1 of the calendar year are required to pay 100\% of the annual dues. New residents with occupancy after July 1 will be required to pay 50\% of the annual dues. New residents will be credited with payment, if previous owner has already paid the annual dues. \end{enumerate} \subsubsection{SECTION 3: Deposits.} All funds of the association shall be deposited to the credit of the association in a bank depository as selected and approved by the Executive Board. It is further agreed that the association shall establish and maintain an adequate reserve fund balance for the periodic maintenance, repairs and replacement of the improvements to the common areas or fixed assets, to be maintained out of the annual dues/assessments. Such minimum balance will be determined and adjusted annually by the Board of Directors. \subsubsection{SECTION 4: Assessments.} The Board of Directors is empowered to issue an assessment to the membership for specified projects not included within the annual budget that serve in the best interest of the Foxboro community as a whole. Prior to the issuance of the assessment, the Board will: \begin{enumerate}[label=\alph*)] \item Communicate to the community of homeowners the proposed project to be included within the assessment. \item A preliminary budget for the project will be submitted to each member. \item A special meeting will be conducted with the membership. \end{enumerate} \subsubsection{SECTION 5: Contracts.} The Board of Directors shall receive competitive bids on all projects before entering into any contract in the name of the Association. Members of the Board shall vote in the plurality on the terms of such contract before execution. \subsubsection{SECTION 6: Checks, Drafts, etc.} The Treasurer shall sign all checks, drafts, or other orders for payment of money, notes or other evidence of indebtedness issued in the name of the association. The Board may, at its discretion, place financial limits on the ability of the Treasurer to issue checks. \subsubsection{SECTION 7: Gifts.} The Board of Directors may accept on behalf of the Association any contribution, gift, bequest or devise for the general purposes or for any special purposes of the Association. \subsubsection{SECTION 8: Contributions.} The Association shall not embark upon any program of the issuance of any funds to charities, special interest groups, clubs, etc. without the approval by two-thirds vote of the households eligible to vote. \subsection{Article VII Miscellaneous} \subsubsection{SECTION 1:} Any question as to the meaning or proper interpretation of any of the provisions of these by-laws shall be determined by the Board of Directors of the Association. \subsubsection{SECTION 2: Definition of Membership.} A member of the Association shall be defined as a legal owner of a household in Foxboro that has paid the current annual Association fees and is entitled to one (1) vote as outlined in Article IV, Sections 5 \& 6. \subsubsection{SECTION 3: Major Capital Expenditures.} Major capital expenditures shall be defined as that expenditure of funds for the construction of new amenities and/or the replacement of existing amenities or repairs. The Board of Directors is empowered to approve such expenditures pursuant to Article V, Section 1 and Article VI, Section 5. \subsubsection{SECTION 4:} The Board of Directors of the Association is empowered to amend the By-Laws of the Association provided approval is by two-thirds of the active Board of Directors.
{ "alphanum_fraction": 0.8035834816, "avg_line_length": 42.8868421053, "ext": "tex", "hexsha": "93eecb0c3d07c2d9150207087d83ceb5df072d6e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e83f79ada466fba81d25e2bab4d7dd465994ace3", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "lueckenhoff/hoa-directory-document", "max_forks_repo_path": "bylaws.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e83f79ada466fba81d25e2bab4d7dd465994ace3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "lueckenhoff/hoa-directory-document", "max_issues_repo_path": "bylaws.tex", "max_line_length": 125, "max_stars_count": null, "max_stars_repo_head_hexsha": "e83f79ada466fba81d25e2bab4d7dd465994ace3", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "lueckenhoff/hoa-directory-document", "max_stars_repo_path": "bylaws.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3500, "size": 16297 }
\subsection{Types, Sets, and Concepts} \p{In formal/computational contexts, types can be defined as sets of both values and \q{expectations} \cite{MathieuBouchard} (meaning assumptions which may be made about all values covered by the type); alternatively, we can (perhaps better) consider types as \i{spaces} of values. Types' extensions have internal structure; there can be \q{null} or \q{invalid} values, default-constructed values, and so forth, which are \q{regions} of the conceptual space spanned or encompassing types.\footnote{Conceptual Space theory is outside the scope of this paper, but I'll note that it suggests a promising link between natural linguistics and formal/computational type theory, as suggested by computational or scientific expositions of the original Conceptual Space account developed by Peter \Gardenfors{}: cf. \cite{Zenker}, \cite{RaubalAdams} \cite{RaubalAdamsCSML}, \cite{Strle}. Meanwhile, projects to develop formal models for Cognitive Grammar have also adopted Conceptual Space theory as an underlying semantics: \cite{KennethHolmqvist}, \cite{HolmqvistDiss}, \cite{MattSelway}, \cite{InteractingConceptualSpaces}. } There is definitional interdependence between types and functions: a function is defined in terms of the types it accepts as parameters and returns \mdash{} rather than its entire set of possible inputs and outputs, which can vary across computing environments. \footnote{Moreover, expectations in a particular case may be more precise than what is implied by the type itself \mdash{} it is erroneous to assume that a proper type system will allow a correct \q{set of values} to be stipulated for each point in a computation (the kind of contract enforced via by documentation and unit testing). So state-space in a given context may include many \q{unreasonable} values, implying that within the overall space there is a \q{reasonable} subspace, except that this subspace may not be crisply defined. } These are some reasons why in theoretical Computer Science types are not \q{reduced} to underlying sets; instead, extensions are sometimes complex spaces that model states of, or internal organization of comparisons among, type instances. } \p{An obvious paradigm is organizing type-extensions around prototype/borderline cases \mdash{} there are instances which are clear examples of types and ones whose classification is dubious. I contend, however, that common resemblance is not always a good marker for types being well-conceived \mdash{} many useful concepts are common precisely because they cover many cases, which makes defining \q{prototypes} or \q{common properties} misleading. Also, sometimes the clearest \q{representative} example of a type or concept is actually not a \i{typical} example: a sample latter or model home is actually not (in many cases) a real letter or home. So resemblance-to-prototype is at best one kind of \q{inner organization} of concepts' and types' spaces of extension. } \p{Sets, concepts, and types represent three different primordial thought-vehicles for grounding notions of logic and meaning. To organize systems around \i{sets} is to forefront notions of inclusion, exclusion, extension, and intersection, which are also formally essential to mathematical logic and undergird the classical interdependence of sets, logic, and mathematics. To organize systems around \i{concepts} is to forefront practical engagement and how we mold conceptual profiles, as collections of ideas and pragmas, to empirical situations. To organize systems around \i{types} is to forefront \q{functions} or transformations which operate on typed values, the interrelationships between different types (like subtypes and inclusion \mdash{} a type can itself encompass multiple values of other types), and the conceptual abstraction of types themselves from the actual sets of values they may exhibit in different environments. Sets and types are formal, abstract phenomena; whereas concepts are characterized by gradations of applicability, and play flexible roles in thought and language. The cognitive role of concepts can be discussed with some rigor, but there is a complex interplay of cognitive schema and practical engagements which would have to be meticulously sketched in many real-world scenarios, if our goal were to translate conceptual reasoning to formal structures on a case-by-case basis. We can, however, consider in general terms how type-theoretic semantics can capture conceptual structures as part of the overall transitioning of thoughts to langauge. } \p{A concept does not merely package up a definition, like \q{restaurant} as \q{a place to order food}; instead concepts link up with other concepts as tools for describing and participating in situations. Concepts are associated with \q{scripts} of discourse and action, and find their range of application through a variegated pragmatic scope. We should be careful not to overlook these pragmatics, and assume that conceptual structures can be simplistically translated to formal models. Cognitive Linguistics critiques Set-Theoretic or Modal Logic reductionism (where a concept is just a set of instances, or an extension across different possible worlds) \mdash{} George Lakoff and Mark Johnson, prominently, argue for concepts' organization around prototypes (\cite[p. 18]{LakoffJohnson}; \cite[p. 171, or p. \textit{xi}]{Johnson}) and embodied/enactive patterns of interaction (\cite[p. 90]{LakoffJohnson}; \cite[p. 208]{Johnson}). Types, by contrast, at least in linguistic applications of type theory, are abstractions defined in large part by quasi-functional notions of phrase struture. Nevertheless, the \i{patterns} of how types may inter-relate (mass-noun or count-noun, sentient or non-sentient, and so forth) provide an infrastructure for conceptual understandings to be encoded in language \mdash{} specifically, to be signaled by which typed articulations conversants choose to use. A concept like \i{restaurant} enters language with a collection of understood qualities (social phenomena, with some notion of spatial location and being a \q{place}, etc.) that in turn can be marshaled by sets of allowed or disallowed phrasal combinations, whose parameters can be given type-like descriptions. Types, in this sense, are not direct expressions of concepts but vehicles for introducing concepts into language. } \p{Concepts (and types also) are not cognitively the same as their extension \mdash{} the concept \i{restaurant}, I believe, is distinct from concepts like \i{all restaurants} or \i{the set of all restaurants}. This is for several reasons. First, concepts can be pairwise different not only through their instances, but because they highlight different sets of attributes or indicators. The concepts \q{American President} and \q{Commander in Chief} refer to the same person, but the latter foregrounds a military role. Formal Concept Analysis considers \i{extensions} and \q{properties} \mdash{} suggestive indicators that inhere in each instance \mdash{} as jointly (and co-dependently) determinate: concepts are formally a synthesis of instance-sets and property-sets \cite{YiyuYao}, \cite{Belohlavek}, \cite{Wille}. Second, in language, clear evidence for the contrast between \i{intension} and \i{extension} comes from phrase structure: certain constructions specifically refer to concept-extension, triggering a mental shift from thinking of the concept as a schema or prototype to thinking of its extension (maybe in some context). Compare: \begin{sentenceList}\sentenceItem{} \label{itm:rhinor} Rhinos in that park are threatened by poachers. \sentenceItem{} Young rhinos are threatened by poachers. \end{sentenceList} Both sentences focus a conceptual lens in greater detail than \i{rhino} in general, but the second does so more intensionally, by adding an extra indicative criterion; while the former does so extensionally, using a phrase-structure designed to operate on and narrow our mental construal of \q{the set of all rhinos}, in the sense of \i{existing} rhinos, their physical place and habitat, as opposed to the \q{abstract} (or \q{universal}) type. So there is a familiar semantic pattern which mentally transitions from a lexical type to its extension and then extension-narrowing \mdash{} an interpretation that, if accepted, clearly shows a different mental role for concepts of concepts' \i{extension} than the concepts themselves.\footnote{There is a type-theoretic correspondence between intension and extension \mdash{} for a type \Tnoindex{} there is a corresponding \q{higher-order} type of \i{sets} whose members are \Tnoindex{} (related constructions are the type of \i{ordered sequences} of \Tnoindex{}; unordered collections of \Tnoindex{} allowing repetition; and stacks, queues, and deques \mdash{} double-ended queues \mdash{} as \Tnoindex{}-lists that can grow or shrink at their beginning and/or end). If we take this (higher-order) type gloss seriously, the extension of a concept is not its \i{meaning}, but a different, albeit interrelated concept. Extension is not definition. \i{Rhino} does not mean \i{all rhinos} (or \i{all possible rhinos}) \mdash{} though arguably there are concepts \i{all rhinos} and \i{all restaurants} (etc.) along with the concepts \i{rhino} and \i{restaurant}. } } \p{Concepts, in short, do not mentally signify sets, or extensions, or sets-of-shared-properties. Concepts, rather, are cognitive/dialogic tools. Each concept-choice, as presentation device, invites its own follow-up. \i{Restaurant} or \i{house} have meaning not via idealized mental pictures, or proto-schema, but via kinds of things we do (eat, live), of conversations we have, of qualities we deem relevant. Concepts do not have to paint a complete picture, because we use them as part of ongoing situations \mdash{} in language, ongoing conversations. Narrow concepts \mdash{} which may best exemplify \q{logical} models of concepts as resemblance-spaces or as rigid designators to natural kinds \mdash{} have, in practice, fewer use-cases \i{because} there are fewer chances for elaboration. Very broad concepts, on the other hand, can have, in context, too \i{little} built-in \i{a priori} detail. (We say \q{restaurant} more often than \i{eatery}, and more often than \i{diner}, \i{steakhouse}, or \i{taqueria}). Concepts dynamically play against each other, making \q{spaces} where different niches of meaning, including levels of precision, converge as site for one or another. Speakers need freedom to choose finer or coarser grain, so concepts are profligate, but the most oft-used trend toward middle ground, neither too narrow nor too broad. \i{Restaurant} or \i{house} are useful because they are noncommittal, inviting more detail. These dynamics govern the flow of inter-concept relations (disjointness, subtypes, partonymy, etc.). } \p{Concepts are not rigid formulae (like instance-sets or even attributes fixing when they apply); they are mental gadgets to initiate and guide dialog. Importantly, this contradicts the idea that concepts are unified around instances' similarity (to each other or to some hypothetical prototype): concepts have avenues for contrasting different examples, invoking a \q{script} for further elaboration, or for building temporary filters. In, say, \begin{sentenceList}\sentenceItem{} Let's find a restaurant that's family-friendly. \end{sentenceList} allowing such one-off narrowing is a feature of the concept's flexibility. } \p{In essence: no less important, than acknowledged similarities across all instances, are well-rehearsed ways \visavis{} each concept to narrow scope by marshaling lines of \i{contrast}, of \i{dissimilarity}. A \i{house} is obviously different from a \i{skyscraper} or a \i{tent}, and better resembles other houses; but there are also more nontrivial \i{comparisons} between houses, than between a house and a skyscraper or a tent. Concepts are not only spaces of similarity, but of \i{meaningful kinds of differences}. } \p{To this account of conceptual breadth we can add the conceptual matrix spanned by various (maybe overlapping) word-senses: to \i{fly}, for example, names not a single concept, but a family of concepts all related to airborn travel. Variations highlight different features: the path of flight (\i{fly to Korea}, \i{fly over the mountain}); the means (\i{fly Korean air}, \i{that model flew during World War II}); the cause (\i{sent flying (by an explosion)}, \i{the bird flew away (after a loud noise)}, \i{leaves flying in the wind}). Words allow different use-contexts to the degree that their various \i{senses} offer an inventory of aspects for highlighting by \i{morphosyntactic} convention. Someone who says \i{I hate to fly} is not heard to dislike hand-gliding or jumping off mountains.\footnote{People, unlike birds, do not fly \mdash{} so the verb, used intransitively (not flying \i{to} somewhere in particular or \i{in} something in particular), is understood to refer less to the physical motion and more to the socially sanctioned phenomenon of buying a seat on a scheduled flight on an airplane. The construction highlights the procedural and commercial dimension, not the physical mechanism and spatial path. But it does so \i{because} we know human flight is unnatural: we can poetically describe how the sky is filled with flying leaves or birds, but not \q{flying people}, even if we are nearby an airport. } Accordant variations of cognitive construal (attending more to mode of action, or path, or motives, etc.), which are elsewhere signaled by grammatic choices, are also spanned by a conceptual space innate to a given word: senses are finer-grained meanings availing themselves to one construal or another. } \p{So situational construals can be signaled by word- and/or syntactic form choice (locative, benefactive, direct and indirect object constructions, and so forth). Whereas conceptual organization often functions by establishing classifications, and/or invoking \q{scripts} of dialogic elaboration, cognitive structure tends to apply more to our attention focusing on particular objects, sets of objects, events, or aspects of events or situations. So the contrast between singular, mass-multiples, and count-multiples, among nouns, depends on cognitive construal of the behavior of the referent in question (if singular, its propensity to act or be conceived as an integral whole; if multiple, its disposition to either be divisible into discrete units, or not). Or, events can be construed in terms of their causes (their conditions at the outset), or their goals (their conditions at the conclusion), or their means (their conditions in the interim). Compare \i{attaching} something to a wall (means-focused) to \i{hanging} something on a wall (ends-focused); \i{baking} a cake (cause-focus: putting a cake in the oven with deliberate intent to cook it) to \i{burning} a cake (accidentally overcooking it).\footnote{We can express an intent to bake someone a cake, but not (well, maybe comedically) to \i{burn} someone a cake (\q{burn}, at least in this context, implies something not intended); however, we \i{can} say \q{I burnt your cake}, while it is a little jarring to say \q{I baked your cake} \mdash{} the possessive implies that some specific cake is being talked about, and there is less apparent reason to focus on one particular stage of its preparation (the baking) once it is done. I \i{will} bake a cake, in the future, uses \q{bake} to mean also other steps in preparation (like \q{make}), while, in the present, \q{the cake \i{is} baking} emphasizes more its actual time in the oven. I \i{baked your cake} seems to focus (rather unexpectedly) on this specific stage even after it is completed, whereas \i{I baked you a cake}, which is worded as if the recipient did not know about the cake ahead of time, apparently uses \q{bake} in the broader sense of \q{made}, not just \q{cooked in an oven}. Words' senses mutate in relation to the kinds of situations where they are used \mdash{} why else would \i{bake} mean \q{make}/\q{prepare} in the past or future tense but \q{cook}/\q{heat} in the present? } These variations are not random assortments of polysemous words' senses: they are, instead, rather predictably distributed according to speakers' context-specific knowledge and motives. } \p{I claim therefore that \i{concepts} enter language complexly, influenced by conceptual \i{spaces} and multi-dimensional semantic and syntactic selection-spaces. Concepts are not simplistically \q{encoded} by types, as if for each concept there is a linguistic or lexical type that just disquotationally references it \mdash{} that the type \q{rhino} means the concept \i{rhino} (\q{type} in the sense that type-theoretic semantics would model lexical data according to type-theoretic rules, such as \i{rhino} as subtype of \i{animal} or \i{living thing}). Cognitive schema, at least in the terms I just laid out, select particularly important gestalt principles (force dynamics, spatial frames, action-intention) and isolate these from a conceptual matrix. On this basis, we can argue that these schemata form a precondition for concept-to-type association; or, in the opposite logical direction, that language users' choices to employ particular type articulations follow forth from their prelinguistic cognizing of practical scenarios as this emerges out of collections of concepts used to form a basic understanding of and self-positioning within them. } \p{In this sense I called types \q{vehicles} for concepts: not that types \i{denote} concepts but that they (metaphorically) \q{carry} concepts into language. \q{Carrying} is enabled by types' semi-formal rule-bound interactions with other types, which are positioned to capture concepts' variations and relations with other concepts. } \p{To express a noun in the benefactive case, for example, which can be seen as attributing to it a linguistic type consistent with being the target of a benefactive, is to capture the concept in a type-theoretic gloss. It tells us, I'm thinking about this thing in such a way that it \i{can} take a benefactive (the type formalism attempting to capture that \q{such a way}). A concept-to-type \q{map}, as I just suggested, is mediated (in experience and practical reasoning) by cognitive organizations; when (social, embodied) enactions take linguistic form, these organizing principles can be encoded in how speakers apply morphosyntactic rules. } \p{So the linguistic structures, which I propose can be formally modeled by a kind of type theory, work communicatively as carriers and thereby signifiers of cognitive attitudes. The type is a vehicle for the concept because it takes part in constructions which express conceptual details \mdash{} the details don't emerge merely by virtue of the type itself. I am not arguing for a neat concept-to-type correspondence; instead, a type system provides a \q{formal substrate} that models (with some abstraction and simplification) how properties of individual concepts translate (via cognitive-schematic intermediaries) to their manifestation in both semantics and syntax. } \p{Continuing with declention as a case study, consider how an \q{ontology} of word senses can interrelate with the benefactive. A noun as a benefactive target most often is a person or some other sentient/animate being; an inanimate benefactive is most likely something artificial and constructed (cf., \i{I got the car new tires}). How readily hearers accept a sentence \mdash{} and the path they take to construing its meaning so as to make it grammatically acceptable \mdash{} involves interlocking morphological and type-related considerations; in the current example, the mixture of benefactive case and which noun \q{type} (assuming a basic division of nouns into e.g. animate/constructed/natural) forces a broader or narrower interpretation. A benefactive with an \q{artifact} noun, for example, almost forces the thing to be heard as somehow disrepaired: \begin{sentenceList}\sentenceItem{} I got glue for your daughter. \sentenceItem{} I got glue for your coffee mug. \end{sentenceList} We gather (in the second case) that the mug is broken \mdash{} but this is never spelled out by any lexical choice; it is implied indirectly by using benefactive case. It is easy to design similar examples with other cases: a locative construction rarely targets \q{sentient} nouns, so in \begin{sentenceList}\sentenceItem{} We're going to Grandma! \sentenceItem{} Let's go to him right now. \sentenceItem{} Let's go to the lawyers. \sentenceItem{} Let's go to the press. \end{sentenceList} we mentally substitute the person with the place where they live or work. } \p{Morphosyntactic considerations are also at play: \i{to the lawyers} makes \q{go} sound more like \q{consult with}, partly because of the definite article (\i{the} lawyers implies conversants have some prior involvement with specific lawyers or else are using the phrase metonymically, as in \q{go to court} or \q{to the courts}, for legal institutions generally; either reading draws attention away from literal spatial implications of \q{go}). \i{Go to him} implies that \q{he} needs some kind of help, because if the speaker just meant going to wherever he's at, she probably would have said that instead. } \p{Similarly, the locative in \i{to the press} forces the mind to reconfigure the landmark/trajector structure, where \i{going} is thought not as a literal spatial path and \i{press} not a literal destination \mdash{} in other words, the phrase must be read as a metaphor. But the \q{metaphor} here is not \q{idiomatic} or removed from linguistic rules (based on mental resemblance, not language structure); here it clearly works off of formal language patterns: the landmark/trajector relation is read abstracted from literal spatial movement because the locative is applied to an expression (\i{the press}) which does not (simplistically) meet the expected interpretation as \q{designation of place}. } \p{In short, there are two different levels of \i{granularity} where we can look for agreement requirements: a more fine-grained level where e.g. \i{locative} draws in a type-specification of a \i{place} or \i{location}; and a coarser level oriented toward Parts of Speech, and typologies of phrasal units. On the finer scale, what linguistics can draw from type theory gravitates toward type-coercions, \q{dependent types}, and topics based on programming language type systems, like \q{monads} \mdash{} \cite{ZhaohuiLuo}, \cite{LuoSoloviev}, \cite{ChatzikyriakidisLuo}, \cite{AsherPustejovsky}, \cite{BarkerShan}, \cite{ShanMonads}, \cite{ShanThesis}. On the coarser scale, on the other hand, analyses can focus on the interconnections between types and functions: most top-level linguistic types fit the formal model of (in computational contexts) \q{functional} types, associated with types of \q{inputs} and \q{outputs}. For instance, assuming we have a primordial \i{noun} type and a \i{proposition} type (the type assigned to complete sentences), a \i{verb} is then at some abstract reading a \q{function} from nouns to propositions \mdash{} insofar as verbs produce propositions when combined with nouns. Similarly an adjective maps nouns to other nouns (in a conceptual sense; noun-phrases, literally speaking); adverbs map verbs to other verbs, and so forth. } \p{The proposition type (say, \q{\Prop{}}) provides a type attribution for sentences, but also for sentence parts: \i{he is at school}, for example, presents a complete idea, either as its own sentence or part of a larger one. In the latter case, a \Prop{} phrase would typically be preceded with a word like \i{that}; \i{syntactically}, \q{that} is essentially a connector, helping sentence-parts link with each other: \begin{sentenceList}\sentenceItem{} I think he is at school. \sentenceItem{} \label{itm:bt} I believe that he is at school. \end{sentenceList} Type-theoretically, however, we may want to assign types to every word, even those \mdash{} like \i{that} in (\ref{itm:bt}) which seem auxiliary and lacking much or any semantic content of their own. Arguably, \i{that} serves to \q{package} an assertion, encapsulating a proposition as a presumed fact designated as one idea, for the sake of making further comments, as if \q{making a noun} out of it: \PropToN{}. Perhaps our intuitions are more as if \i{that he is at school} is also a proposition, maybe a subtly different kind, by analogy to how questions and commands are also potentially \Prop{} variants. Since \thatPhrases{} are \q{arguments} for verbs, the choice then becomes whether it is useful to expand our type picture of verbs so that they may act on propositions as well as nouns, or rather type \q{encapsulated} propositions as just nouns (maybe special kinds of nouns). } \p{In either case, \i{I know that ...} clearly involves a verb with subject and direct object: so either \VisNNtoProp{} or \VisNProptoProp{}. In Link Grammar, the connection between a verb and its assertorial direct object is labeled \TS{}.\footnote{Link Grammar models parses via interword relations classified according to some four dozen recognized syntactic and semantic connectors; a complete parse yields a labeled \q{graph} of each sentence, similar to graphs derived from Depenency Grammars, with link-kinds notated via labels like \q{\TS{}} (the exact vocabulary of link kinds establishes specific grammars for specific natural languages) \mdash{} cf. \cite{SleatorTamperley}; \cite{Debusmann}, \cite{DebusmannThesis}, \cite{DebusmannDuchierRossberg}, \cite{Nivre}. Link Grammar is distinguished from Depenency Grammar in that the former does not directly model head/dependent relations within word-pairs, but it \i{does} develop a theory of individual words carrying incompleteness or expectations which define \q{compatibility} between words, allowing links to be formed; the link/dependency comparison is investigated in \cite{KongRushSmith}, \cite{HongEisner}; and other writings compare both grammatic schools to more hierarchical phrase-structure grammars: \cite{Schneider}, \cite{XiaPalmer}. } Consider the role of a \TS{}-link here \mdash{} the purely formal consideration is ensuring that types are consistent: either the \TS{} target is \Prop{}, with the verb type modified accordingly; or the \TS{} target is a noun, though here it is fair to narrow scope. For this particular kind of link, the target must express a proposition: either typed directly as such or typed as, say, a noun \q{packaging} a proposition, which would then be a higher-order type relation (just as \q{redness} is a noun \q{packaging} an adjective, or \q{running} is an adjective packaging a verb). In other words, it is difficult to state the type restrictions on the link-pair without employing more complex or higher-order type formations. } \p{On the other hand, this is another example of the fuzzy boundary between syntax and semantics: given a sentence which seems to link a verb calling for a belief or assertion (like \q{know}, \q{think}, \q{suggest}, \q{to be glad}) to something that is not proposition-like, is such a configuration ungrammatical, or just hard to understand? Clearly, the \i{semantic} norms around verbs like \q{know} is that their \i{subject} has some quality of sentience (or can be meaningfully attributed belief-states, even if speakers know not to take it literally: \q{The function doesn't know that this number will never be zero}); and their \i{object} should be somehow propositional. But applying type theory (or type theory in conjunction with Dependency Grammar) leaves open various analytic preferences: these requirements can be presented as rigid grammatic rules or as \q{post-parsing} semantic regulations. How to model the qualities of sentience (or at least of having propositional attitudes broadly conceived), for the noun, and of propositionality, for the direct object, are again at the discretion of the analysis (subtypes, quality-associations, or etc.) \mdash{} Figure ~\ref{fig:Iknow} shows one potential, rather simplified unpacking of the sentence; from this structure details can be added perhaps as extra syntax constraints or perhaps more as cues to interpretation.\input{figure.tex} If these requirements are seen as more syntactic, so qualities are incorporated into data like Part of Speech (say, a noun designating something with propositional attitudes being a subtype of of a generic \N{} type), then we are more likely to analyze violations as simply incorrect (recall \q{The tree wants to run away from the dog} \mdash{} ungrammatical or just somehow \q{exotic}?). Some examples suggest less incorrectness as clever or poetic usage \mdash{} so a richer analysis may recognize expressions as type- and link-wise acceptable, but showing incongruities (which is not the same as impropriety) at a more fine-grained type level. That \i{to want} takes a subject \i{associated} with sentience does not force type annotations to inscribe this in grammatic or lexical laws; instead, these associations can be introduced as potential \q{side effects}, \i{triggering} re-associations such as forcing hearers to ascribe sentience to something (like a tree) where such ascription is not instinctive. The type effect in this case lies more at the conceptual level, the language-user sifting conceptual backgrounds to find a configuration proper to the type requirements (in what sense can a tree \q{want} something?). In this \q{tree} case we probably appeal to concepts of \q{as if}: if the tree \i{were} sentient, it would be nervous of the dog sniffing around \mdash{} a humorous way of calling attention to the dog's actions (obliquely maybe alluding to people's background knowledge that dogs sometimes do things, like pee, in inconvenient places, from humans' perspectives). } \p{In brief, it is certainly possible \mdash{} though by no means mandatory \mdash{} to model type requirements with greater flexibility at a provisional grammatical layer, and then narrow in on subtypes or extra accumulations of qualifications on type-instances in a transition from grammar to semantics. Perhaps cognitive schema occupy an intermediary role: progressing from basic recognition of grammaticality, through cognitive schema, to conceptual framing, with type machinery capturing some of the thought-processes at each \q{step} (not that such \q{steps} are necessarily in a temporal sequence). The basic verb-subject-direct object articulation sets up an underlying cognitive attitude (represented by a basic type-framing of verb, noun, and proposition, like the \VisNNtoProp{} signature). Cognitive ascriptions fill this out by adding detail to the broader-hewed typing, associating sentience with the subject and propositionality with the object (sub- or higher-order typing modeling this stage). And how the actual lexical choices fit these cognitive expectations \mdash{} I call them cognitive because they are intrinsically tied to structurational schema in the type, morphology, and word-order givens in the encountered language \mdash{} compels conversants to dip into background beliefs, finding concepts for the signified meanings that hew to the intermediary cognitive manipulations (finding ways to conceptualize the subject as sentient, for example). This also has a potential type model, perhaps as forcing a type conversion from a lexical element which does not ordinarily fit the required framing (such as giving inanimate things some fashion of sentience). Type theory can give a window onto unfolding intellection at these multiple stages, although we need not conclude that the mind subconsciously doing this thinking mimics a computer that churns through type transformations mechanically and exactly. } \p{I envision the unfolding that I have just sketched out as something Phenomenological \mdash{} it arises from a unified and subjective consciousness, one marked by embodied personal identity and social situation. If there are structural stases that can be found in this temporality of experience, these are not constitutive of conscious reality but a mesh of rationality that supports it, like the veins in a leaf. Stuctural configurations can be lifted from language insofar as it is a conscious, formally governed activity, and lifted from the ambient situations which lend language context and meaning intents. So any analytic emphasis on structural fixpoints threaded through the lived temporality of consciousness is an abstraction, but one that is deliberate and necessary if we want to make scientific or in any other manner disputatable claims about how language and congition works. In that spirit, then, I will try to condense the three \q{layers} of unfolding understanding, which as I have sketched them are posited in the metaphysical order of temporal experience \mdash{} \q{unfolding} in likely overlapping, blending ways \mdash{} I will \q{read into} them a more static and logically stacked meta-structure. Where I have sketched three layers or stages of unfolding language understanding, I will transition to proposing three \q{tiers} of language organization, in particular three levels where type-theoretic models can be applied. }
{ "alphanum_fraction": 0.7911178807, "avg_line_length": 65.5244618395, "ext": "tex", "hexsha": "458500c1278c58753c201b19b9f76745274ff462", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8fd304d7df709d32367e49a98fb99f16162c5477", "max_forks_repo_licenses": [ "BSL-1.0" ], "max_forks_repo_name": "ScignScape-RZ/phcg", "max_forks_repo_path": "tth/section4a.ngml.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8fd304d7df709d32367e49a98fb99f16162c5477", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSL-1.0" ], "max_issues_repo_name": "ScignScape-RZ/phcg", "max_issues_repo_path": "tth/section4a.ngml.tex", "max_line_length": 139, "max_stars_count": null, "max_stars_repo_head_hexsha": "8fd304d7df709d32367e49a98fb99f16162c5477", "max_stars_repo_licenses": [ "BSL-1.0" ], "max_stars_repo_name": "ScignScape-RZ/phcg", "max_stars_repo_path": "tth/section4a.ngml.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 8025, "size": 33483 }
% !TEX program = pdflatex \documentclass{tufte-handout} \title{\centering PHY202: Statistical Physics} \author{Aditya Dev} \date{\today} % without \date command, current date is supplied %\geometry{showframe} % display margins for debugging page layout \usepackage{graphicx} % allow embedded images \setkeys{Gin}{width=\linewidth,totalheight=\textheight,keepaspectratio} \usepackage{amsmath} % extended mathematics \usepackage{booktabs} % book-quality tables \usepackage{units} % non-stacked fractions and better unit spacing \usepackage{multicol} % multiple column layout facilities \usepackage{lipsum} % filler text \usepackage{fancyvrb} % extended verbatim environments \fvset{fontsize=\normalsize}% default font size for fancy-verbatim environments % Standardize command font styles and environments \newcommand{\doccmd}[1]{\texttt{\textbackslash#1}}% command name -- adds backslash automatically \newcommand{\docopt}[1]{\ensuremath{\langle}\textrm{\textit{#1}}\ensuremath{\rangle}}% optional command argument \newcommand{\docarg}[1]{\textrm{\textit{#1}}}% (required) command argument \newcommand{\docenv}[1]{\textsf{#1}}% environment name \newcommand{\docpkg}[1]{\texttt{#1}}% package name \newcommand{\doccls}[1]{\texttt{#1}}% document class name \newcommand{\docclsopt}[1]{\texttt{#1}}% document class option name \newenvironment{docspec}{\begin{quote}\noindent}{\end{quote}}% command specification environment %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % add numbers to chapters, sections, subsections \setcounter{secnumdepth}{2} \usepackage{xcolor} \definecolor{g1}{HTML}{077358} \definecolor{g2}{HTML}{00b096} % chapter format %(if you use tufte-book class) %\titleformat{\chapter}% %{\huge\rmfamily\itshape\color{red}}% format applied to label+text %{\llap{\colorbox{red}{\parbox{1.5cm}{\hfill\itshape\huge\color{white}\thechapter}}}}% label %{2pt}% horizontal separation between label and title body %{}% before the title body %[]% after the title body % section format \titleformat{\section}% {\normalfont\Large\itshape\color{g1}}% format applied to label+text {\llap{\colorbox{g1}{\parbox{1.5cm}{\hfill\color{white}\thesection}}}}% label {1em}% horizontal separation between label and title body {}% before the title body []% after the title body % subsection format \titleformat{\subsection}% {\normalfont\large\itshape\color{g2}}% format applied to label+text {\llap{\colorbox{g2}{\parbox{1.5cm}{\hfill\color{white}\thesubsection}}}}% label {1em}% horizontal separation between label and title body {}% before the title body []% after the title body %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \usepackage{color-tufte} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{document} \maketitle% this prints the handout title, author, and date \begin{abstract} \noindent I'm made these notes after following the lectures by Prof. Susskind \end{abstract} %\printclassoptions \section{Entropy}\label{sec:page-layout} We define Entropy as follows: \begin{definition}[Entropy]\label{def:entropy} Suppose a system can take values from set \(\mathcal{S} = \{x_1, x_2, x_3, \ldots x_n\}\), with probability of \(x_i\) being \(\mu(x_i)\). Then the (\textit{Boltzmann}) entropy \(S\) is defined as \[S = -k_b \sum _{i} \mu(x_i) \log(\mu(x_i))\] \end{definition} Entropy can be thought of as the amount of ignorance/uncertainity that you have. It's obvious that if probability is 1 or 0 the you are \(100 \%\) sure that a particular result will be observed or not observed. \begin{assume} If \(S_a\) and \(S_b\) be respective entropies for system A and B. Then \[S_{\mathrm{net}} = S_a + S_b\] \label{assum:entropy} \end{assume} Using above assumption, if we have \(N\) identical systems then the net entropy of the system is given as \(S = -Nk_b \sum \mu_i\log(\mu_i)\) \subsection{*On existence of Entropy} Does entropy exists? Or is it just a mathematial formalism? I'll try to answer it it in my own way. But for that I need to make an assumption, and it's a fairly good assumption. Let me give an example to support my argument, suppose we are given an experiment of tossing a fair coin, let say \(n\) number of times, and we are asked to count probabilities of number of getting a head or tails. Since, the above experiment follows a Binomial distribution, we know that most number of times we are getting equal number of heads and tails (\textit{because number of arrangement given equal number of heads and tails is the most}). So, the assumption that we'll make is: \begin{assume} Given a constraint on a system, in equilibrium, the system will exist in the state with maximum number of configurations (\textit{or microstates}). And state is consistent with the constraint. \end{assume} We'll see in Microcanonical Ensemble, the constrain on the system is energy \(E\), volume \(V\) and \(N\) the number of particles. Let \(\Omega(E, V, N)\) denote the possible number of microstates of the system. Let's sub-divide this system into two non-interacting parts, call \(A\) and \(B\). So, we have: \[\begin{gathered} \Omega(E, V, N) = \Omega_A(E_A, V_A, N_A)\Omega_B(E_B, V_B, N_B)\\ E = E_A + E_B\\ V = V_A + V_B\\ N = N_A + N_B \end{gathered}\] Since, by assumption 1.2 we need to maximize the \(\Omega\). So, \[\begin{gathered} \dfrac{\partial \Omega}{\partial E} = 0\\ \dfrac{\partial (\Omega_A \Omega_B)}{\partial E} = 0 \end{gathered}\] Since, \(E\) is constant, we can formulate the whole argument in terms of either \(E_A\) or \(E_B\). \[\begin{gathered} \dfrac{\partial (\Omega_A(E_A) \Omega_B(E - E_A))}{\partial E_A} = 0\\ \dfrac{\partial (\Omega_A(E_A))}{\partial E_A}\Omega_B(E - E_A) - \dfrac{\partial (\Omega_B(E - E_A))}{\partial E_A}\Omega_A(E_A) = 0\\ \frac{1}{\Omega_A(E_A)}\dfrac{\partial (\Omega_A(E_A))}{\partial E_A} = \frac{1}{\Omega_B(E - E_A)}\dfrac{\partial (\Omega_B(E - E_A))}{\partial E_A}\\ \dfrac{\partial \log(\Omega_A(E_A))}{\partial E_A} = \dfrac{\partial \log(\Omega_B(E - E_A))}{\partial E_A} \end{gathered}\] We define the quantity \(k_b\log(\Omega)\) \footnote{the factor \(k_b\) has something to do with units}and ``Entropy'' and denote it be \(S\). \[S = k_b\log(\Omega)\] Hence, the entropy comes before any of the physical quantities like pressure, temperature etc. We'll see in the next section, that how the two definition of Entropy are related. \section{Microcanonical Ensemble} In \textit{Microcanonical Ensemble} \footnote{\textit{An \textbf{ensemble} is a large number of points in the phase space that can be described by a density function \(\mu (x, p)\)}} we consider an isolated system with \(N\) particles and energy \(E\) in a volume \(V\). By definition, such a system exchanges neither particles nor energy with the surroundings. Let a system consist of {\(N\) particles}, for which each particle can have \(M\) configurations \footnote{Configurations can be the rotation angle about some axis or spin being up or down etc}. Also, if a particle is in \(i^{th}\) state then let the energy associated with that particle be \(\epsilon _i\). For this system the total energy \(E = \sum n_i \epsilon_i\), where \(n_i\) denotes the number of particles in state (\textit{ or configuration i }). Hence the constrain equations on the system are: \[\begin{gathered} E = \sum n_i \epsilon_i\\ \sum n_i = N \end{gathered}\] In the thermodynamic limit of Statistical mechanics we usually assume \(N\) and \(V\) to be very large (i.e \(N, V \to \infty\)) such that \(\rho = \frac{N}{V}\) is constant. In such case, we can assume that the number of particle in each state increases proportional to \(N\). Let probability of finding particle in \(i^{th}\) configuration being \(\mu_i\) and by the definition of probability we have \(\mu_i = \lim_{N \to \infty} \frac{n_i}{N}\) or \(n_i = \mu_i N\). Now, we can reformulate the above constrain equations in terms of probability. And we have \[\begin{gathered} \sum \mu_i = 1 \Longleftrightarrow \sum n_i = N\tag*{1}\\ \sum_i N\mu_i \epsilon_i = E \Longleftrightarrow \sum_i \mu_i \epsilon_i = \epsilon \\ \end{gathered}\] We define \(\epsilon = \frac{E}{N} = \langle E \rangle\) as average energy per particle. Let \(\Omega\) be the number of possible microstates of the system \footnote{in our case it's the possible arrangements N particles, with exactly \(n_i\) number of particles in \(i^{th}\)configuration}. If you know some basics combinatorics, then for our case \(\Omega\) is give as \[\begin{gathered} \Omega = \frac{N!}{\Pi n_i !}\\ \log(\Omega) = \log(N!) - \sum \log(n_i!) \end{gathered}\] Now we need to introduce \textit{Stirling's Approximation}, it says for large \(N\) \[\begin{gathered} N! \sim e^{-N} N^N\\ log(N!) \sim N \log(N) - N \end{gathered}\] Therefore we have, \[\begin{gathered} \log(\Omega) = N \log(N) - N - \sum n_i \log(n_i) + \sum n_i \\ \log(\Omega) = N \log(N) - N - \sum n_i \log(n_i) + N \\ \log(\Omega) = N \log(N) - \sum N \mu _i\log(N) - \sum N \mu _i\log(\mu_i)\\ \log(\Omega) = N \log(N) - N \log(N) \sum \mu _i - \sum N \mu_i \log(\mu_i) \\ \log(\Omega) = -N\sum \mu_i \log(\mu_i) \end{gathered}\] Buy assumption 1.1 observe that \(\boxed{S = k_b\log(\Omega) = -N k_b \sum \mu_i\log(\mu_i)}\). Hence, entropy of the system can also be defined as \(S = k_b \log(\Omega)\)\footnote{\(\Omega\) is the total number of accessible microstates of the system}. Also, note that \(-k_b \sum \mu_i\log(\mu_i)\) is the entropy associated with each particle. \section{Boltzmann Distribution} \textit{In statistical mechanics and mathematics, a Boltzmann distribution (also called Gibbs distribution) is a probability distribution or probability measure that gives the probability that a system will be in a certain state as a function of that state's energy and the temperature of the system.} We'll try to derive the Boltzmann distribution from Microcanonical ensemble. The Boltzmann distribution is the distribution that maximizes the entropy \(S = -k_b \sum \mu_i\log(\mu_i)\) or equivalently \(S = -\sum \mu_i\log(\mu_i)\) We need to find the probability distribution of the system under the given constrains, such that the entropy is maximum Hence, we would use \href{https://en.wikipedia.org/wiki/Lagrange_multiplier}{Lagrange multiplier}. Let \[\begin{gathered} F(\{\mu_i\}) = - \sum \mu_i \log(\mu_i) + \alpha (1 - \sum \mu_i) + \beta (\epsilon - \sum \mu_i \epsilon_i) \\ = - \sum \mu_i \log(\mu_i) + \alpha G(\{\mu_i\}) + \beta G'(\{\mu_i\}) \tag*{2} \end{gathered}\] where \(G'(\{\mu_i\}) = 0 \ \& \ G(\{\mu_i\}) = 0\) are constrain equations\footnote{ there was no need for them but, it looks trippy, and that's how mathematicians do it. }. To maximize, differentiate (2) w.r.t \(\mu_i\) and equate to \(0\), we get \[\begin{gathered} \dfrac{d F}{d \mu_i}= - \log(\mu_i) - 1 - \alpha - \beta \epsilon_i =0 \\ \mu_i = e^{-1 - \alpha} e^{-\beta \epsilon}\\ \boxed{\mu_i(\vec{r}, \vec{p}) = \frac{e^{-\beta \epsilon_i(\vec{r}, \vec{p})}}{z}} \end{gathered}\] where \(z = e^{1 + \alpha}\). \(z\) is called the \textit{partition function}. \textbf{NOTICE, THE DEPENDENCE OF ENERGY ON PHASE SPACE} Substitute \(\mu_i\) in above equations and you will see that \[\begin{gathered} \sum \mu_i = 1 \Longleftrightarrow \boxed{z(\beta) = \sum e^{-\beta \epsilon_i}}\\ \sum \epsilon_i \mu_i = \epsilon \Longleftrightarrow \sum \epsilon_i e^{-\beta \epsilon_i} = z \epsilon \end{gathered}\] Also, observe that \[\begin{gathered} \dfrac{\partial z(\beta)}{\partial \beta} = - \sum \epsilon_i e^{-\beta \epsilon_i}= -z\epsilon\\ \boxed{\dfrac{\partial \log(z)}{\partial \beta} = - \epsilon} \end{gathered}\] Let's look at how does the Entropy looks like. We had \[\begin{gathered} S = -k_b \sum \mu_i \log(\mu_i)\\ = -\frac{k_b}{z} \sum \{ e^{-\beta \epsilon_i} \log(e^{-\beta \epsilon_i}) - e^{-\beta \epsilon_i} \log(z)\}\\ = \frac{k_b}{z} \sum \{ e^{-\beta \epsilon_i}{\beta \epsilon_i}) + e^{-\beta \epsilon_i} \log(z)\}\\ = \beta k_b \sum \frac{e^{-\beta \epsilon_i}}{z} \epsilon_i + k_b \log(z) \sum \frac{e^{-\beta \epsilon_i}}{z}\\ \boxed{S = k_b\beta \epsilon + k_b \log(z)} \end{gathered}\] We define (\textit{stastical definition of temperature}) temperature as \footnote{don't question why is that so, it is what it is} \[\dfrac{\partial S}{\partial E} = \frac{1}{T}\] So, \[\begin{gathered} \frac{1}{T} = \dfrac{\partial S}{\partial \epsilon} = k_b \beta\\ \beta = \frac{1}{k_b T} \end{gathered}\] So, finally everything unveils it self and we get \begin{main_equations} \[ \begin{gathered} \mu_i = \frac{e^{\frac{-\epsilon_i}{k_b T}}}{z}\\ z(T) = \sum_{i} e^{\frac{-\epsilon_i}{k_b T}}\\ \epsilon = -\frac{\partial \log(z)}{\partial \beta} = k_B T^2 \frac{\partial \log(z)}{\partial T}\\ S = \frac{\epsilon}{T} + k_b \log(z) \end{gathered} \] \end{main_equations} \subsection{Relation to thermodynamic variables} We know variance is defined as : \[\begin{gathered} \langle (\Delta X)^2 \rangle= \left\langle (X - \langle X\rangle)^2\right \rangle\\ = \langle X^2 \rangle - (\langle X \rangle)^2 \end{gathered}\] We calculate: \[\begin{gathered} \langle E \rangle = \epsilon = -\frac{\partial \log(z)}{\partial \beta}\\ \langle E \rangle = \frac{1}{z}\sum_i e^{-\beta E_i} E_i ^2 \\ = \frac{1}{z} \frac{\partial^2 z}{\partial^2 \beta} \end{gathered}\] So, \(\langle (\Delta E)^2 \rangle\) is: \[\begin{gathered} \langle (\Delta E)^2 \rangle = \frac{1}{z} \frac{\partial^2 z}{\partial^2 \beta} - \left(-\frac{1}{z} \frac{\partial z}{\partial\beta}\right)^2\\ = \frac{\partial^2 \log(z)}{\partial^2 \beta} + \left(\frac{1}{z} \frac{\partial z}{\partial\beta}\right)^2 -\left(\frac{1}{z} \frac{\partial z}{\partial\beta}\right)^2\\ = \frac{\partial^2 \log(z)}{\partial^2 \beta}\\ = - \dfrac{\partial \langle E \rangle}{\partial \beta} \end{gathered}\] or we can write it as \[\begin{gathered} \langle (\Delta E)^2 \rangle = - \dfrac{\partial \langle E \rangle}{\partial \beta}\\ = - \dfrac{\partial \langle E \rangle}{\partial T}\dfrac{\partial T}{\partial \beta}\\ = k_b T^2\dfrac{\partial \langle E \rangle}{\partial T}\\ = k_b T^2 C_v\\ \text{or}\\ \boxed{C_v = \frac{1}{k_b T^2} \langle (\Delta E)^2 \rangle} \end{gathered}\] where \(C_v\) is the heat capacity at constant volume\footnote{it's heat capacity at ``constant volume" because we are talking about Microcanonical ensemble.}. Observe that: \[A = E - TS= -k_b T \log(z)\] where \(A\) is the helmholtz free energy\footnote{it's the definition}. We define pressure \(P\) to be: \[\begin{gathered} P = \left. -\dfrac{\partial E}{\partial V} \right|_{S} = \left. -\dfrac{\partial A}{\partial V} \right|_{T}\\ \boxed{P = k_b T \left (\dfrac{\partial \log(z)}{\partial V} \right ) _{T} } \tag*{1} \end{gathered}\] \subsection{*Meaning of Partition Function} \textit{Source: Wikipedia} It may not be obvious why the partition function, as we have defined it above, is an important quantity. First, consider what goes into it. The partition function is a function of the temperature \(T\) and the microstate energies \(\epsilon_1, \epsilon_2, \epsilon_3, \) etc. The microstate energies are determined by other thermodynamic variables, such as the number of particles and the volume, as well as microscopic quantities like the mass of the constituent particles. This dependence on microscopic variables is the central point of statistical mechanics. With a model of the microscopic constituents of a system, one can calculate the microstate energies, and thus the partition function, which will then allow us to calculate all the other thermodynamic properties of the system. The partition function can be related to thermodynamic properties because it has a very important statistical meaning. The probability \(\mu_i\) that the system occupies microstate \(i\) is \[\mu_i = \frac{1}{z} \mathrm{e}^{- \beta \epsilon_i}\] Thus, as shown above, the partition function plays the role of a normalizing constant (note that it does not depend on i), ensuring that the probabilities sum up to one: \[\sum_i \mu_i = \frac{1}{z} \sum_i \mathrm{e}^{- \beta \epsilon_i} = \frac{1}{z} z = 1\] This is the reason for calling \(z\) the ``partition function": it encodes how the probabilities are partitioned among the different microstates, based on their individual energies. The letter \(z\) stands for the German word \textit{Zustandssumme}, ``sum over states". The usefulness of the partition function stems from the fact that it can be used to relate macroscopic thermodynamic quantities to the microscopic details of a system through the derivatives of its partition function. Finding the partition function is also equivalent to performing a Laplace transform of the density of states function from the energy domain to the \(\beta\) domain, and the inverse Laplace transform of the partition function reclaims the state density function of energies. \section{The Ideal Gas} \subsection{Introduction} Suppose a system is made of N sub-systems (\textit{particles}) with negligible interaction energy, that is, we can assume the particles are essentially non-interacting. If the partition functions of the sub-systems are \(\zeta_1, \zeta_2, \ldots \zeta _N,\) respectively then the partition function of the entire system is the product of the individual partition functions: \[z =\prod_{j=1}^{N} \zeta_j.\] If the sub-systems have the same physical properties, then their partition functions are equal, \(\zeta_1, \zeta_2, \ldots \zeta _N,\) in which case \[z = \zeta^N\] However, there is a well-known exception to this rule. If the sub-systems are actually identical particles, in the quantum mechanical sense that they are impossible to distinguish even in principle, the total partition function must be divided by a \(N!\)\footnote{This is to ensure that we do not ``over-count" the number of microstates. }: \[z = \frac{\zeta^N}{N!}\] While this may seem like a strange requirement, it is actually necessary to preserve the existence of a thermodynamic limit for such systems. This is known as the Gibbs paradox. \subsection{The partition function for ideal gas} If you have taken an introductory probability theory course, then you may know in continuous case, PMF (probability mass function) is replaced by PDF (probability density function). In classical mechanics, the position and momentum variables of a particle can vary continuously, so the set of microstates is actually uncountable. In classical statistical mechanics, it is rather inaccurate to express the partition function as a sum of discrete terms. In this case we must describe the partition function using an integral rather than a sum. We'll assume the systen to be non-interacting. The partion function for single particle (assuming it to a subsystem) is defined as\footnote{ To make it into a dimensionless quantity, we must divide it by h, it also has something to do with the precision with which we can measure the position and momenta in phase space. }: \[\begin{gathered} \zeta = \frac{1}{h^3}\int_p \int_x e^{-\beta E(x, p)} d^3x d^3p \end{gathered}\] For non-interating particles energy is due to momentum only. So, it implies: \[\begin{gathered} \zeta = \frac{1}{h^3}\int_p \int_x e^{-\beta \frac{p^2}{2m}} d^3x d^3p\\ = \frac{1}{h^3} \int_x d^3 x \int_p e^{-\beta \frac{p^2}{2m}}d^3p\\ = \frac{V}{h^3} \int_p e^{-\beta \frac{p^2}{2m}}d^3p \tag*{a} %%\left(\frac{2\pi m}{\beta}\right)^{3/2} \end{gathered}\] observe that \(p^2 = p_x ^2 + p_y ^2 + p_z ^2\) and \(d^3 p= d p_x dp_y dp_z\). We'll make an assumption \begin{assume}[Equipartition theorem] Energy is distributed equally among all the degrees of freedom. In other words if total energy is \(E\) and there are \(d\) degree of freedom. Then each degree of freedom contains \(\frac{E}{d}\) amount of energy. \end{assume} The above form of \(p^2\) becomes \(p^2 = 3 \bar{p} ^2\) and \(d^3 p = (d \bar{p})^3\). And equation \ref{a} becomes \[\begin{gathered} \zeta = \frac{V}{h^3} \int_p e^{-\beta \frac{p^2}{2m}}d^3p \\ = \frac{V}{h^3} \left(\int_p e^{-\beta \frac{\bar{p}^2}{2m}}d \bar{p}\right)^3\\ \zeta = \frac{V}{h^3}\left(\frac{2\pi m}{\beta}\right)^{3/2} \end{gathered}\] Hence, we have taken an ideal gas consisting of identical \(N\) particles. The partition function for the whaole system is \[\begin{gathered} z = \frac{(\zeta)^N}{N!}\\ z = \frac{V^N}{h^{3N}N!}\left(\frac{2\pi m}{\beta}\right)^{3N/2}\\ \log(z) = N \log(V) + \frac{3N}{2} \log(2\pi m)- \frac{3N}{2}\log(\beta) - \log(N!) - 3N\log(h)\end{gathered}\] Energy for the system is \[\begin{gathered} E = - \frac{\partial \log(z)}{\partial \beta} \\ = \frac{3N}{2\beta} \\ \boxed{E = \frac{3N}{2} k_b T} \end{gathered}\] Don't forget the Pressure from equation (i): \[\begin{gathered} P = k_b T \left (\dfrac{\partial \log(z)}{\partial V} \right ) _{T}\\ P = k_b T \frac{N}{V} \implies \boxed{PV = N k_b T} \end{gathered}\] \end{document}
{ "alphanum_fraction": 0.6866779406, "avg_line_length": 44.2577962578, "ext": "tex", "hexsha": "20d83f1b4a594dc30609d0f4cd272ee9d4493b28", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-08-14T19:35:18.000Z", "max_forks_repo_forks_event_min_datetime": "2021-08-14T19:35:18.000Z", "max_forks_repo_head_hexsha": "1674ab3fe5ec2873af740ac3b5c334fad6d2f000", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "dev-aditya/course-notes-core", "max_forks_repo_path": "PHY202: Statistical-Mechanics and Thermodynamics/PHY202_-_Statistical Mechanics.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1674ab3fe5ec2873af740ac3b5c334fad6d2f000", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "dev-aditya/course-notes-core", "max_issues_repo_path": "PHY202: Statistical-Mechanics and Thermodynamics/PHY202_-_Statistical Mechanics.tex", "max_line_length": 144, "max_stars_count": 7, "max_stars_repo_head_hexsha": "1674ab3fe5ec2873af740ac3b5c334fad6d2f000", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "dev-aditya/course-notes-core", "max_stars_repo_path": "PHY202: Statistical-Mechanics and Thermodynamics/PHY202_-_Statistical Mechanics.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-10T16:49:11.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-13T08:54:30.000Z", "num_tokens": 6604, "size": 21288 }
\documentclass[10pt, a4paper, twoside]{basestyle} \usepackage{tikz} \usepackage[Mathematics]{semtex} \usepackage{chngcntr} \counterwithout{equation}{section} \renewcommand{\thesubsection}{\arabic{subsection}} \setcounter{MaxMatrixCols}{20} % TODO(egg): move to semtex. \newlength{\medianToZeroTop} \newcommand\zeroBL[1]{\multicolumn{1}{l}{\rlap{\smash{\text{{#1$0$}}}}}} \newcommand\zeroBR[1]{\multicolumn{1}{r}{\llap{\smash{\text{{#1$0$}}}}}} \newcommand\zeroTR[1]{\multicolumn{1}{r}{\llap{\smash{\setlength{\medianToZeroTop}{\heightof{#1$0$}-1ex}\raisebox{-\medianToZeroTop}{\text{{#1$0$}}}}}}} % TODO(egg): move to basestyle. \usepackage[all]{nowidow} \usepackage{tabls} \usepackage{changepage} \renewcommand*{\raggedleftmarginnote}{} \renewcommand*{\raggedrightmarginnote}{} %%%% Shorthands. \DeclareMathOperator{\bias}{\mathit{bias}} % Rounding brackets will be heavily nested, and reading the nesting depth is critically important, % so we make them grow for readability. \newcommand{\round}[1]{\doubleSquareBrackets*{#1}} \newcommand{\roundTowardZero}[1]{\doubleSquareBrackets{#1}_0} \newcommand{\roundTowardPositive}[1]{\doubleSquareBrackets{#1}_+} \newcommand{\roundTowardNegative}[1]{\doubleSquareBrackets{#1}_-} \newcommand{\hex}[1]{{_{16}}\mathrm{#1}} \newcommand{\bin}[1]{{_{2}}\mathrm{#1}} % For alignment of tabular rounding error analyses. \DeclareDelimiter\doubleSquareBracketEmptyUnkerned{\lBrack}{.} \newcommand{\leftRound}[1]{\doubleSquareBracketEmptyUnkerned*{#1\kern-\nulldelimiterspace}} \newcommand{\bigradient}[4]{\det\begin{pmatrix}\pa{#1}_{#2}\\\pa{#3}_{#4}\end{pmatrix}} %%%% Title and authors. \title{A correctly rounded binary64 cube root} \date{\printdate{2021-06-21}} \author{Robin~Leroy (eggrobin)} \begin{document} \maketitle \noindent This document describes the implementation of correctly-rounded binary64 cube root function {\texttt{principia::numerics::Cbrt}} defined in \href{https://github.com/mockingbirdnest/Principia/blob/master/numerics/cbrt.cpp}{\texttt{numerics/cbrt.cpp}}. \part*{Outline} Our computation of the cube root uses root-finding methods that have fallen into disuse, and thus may be unfamiliar to the reader. To avoid confusion on the properties and names of these methods, this document comprises two parts. The first one is about a family of root-finding methods for arbitrary functions, wherein all methods considered for the cube root can be found. The second is about the computation of a binary64 cube root and its error analysis. The methods considered in the first part originate from a series of works by Thomas Fantet de~Lagny\footnote{Thomas Fantet de~Lagny (1660--1734), professor of hydrography in Rochefort, 1697--1714, subdirector then director of Law's \foreign{\textfrench{Banque générale}}. Member of the \foreign{\textfrench{Académie Royale des Sciences}}. See \cite{Fontenelle1734}, reprinted in \volcite{6}[557\psqq]{Fontenelle1758}.}. Lagny's methods may be used to find a root of any polynomial; curiously, they are of convergence order $p$ for polynomials of degree $p$. We give a generalization of these methods which applies to arbitrary functions (and in particular to polynomials of degree other than the order). Throughout the centuries, some of Lagny's methods have been rediscovered, whether from Lagny's works or from first principles, and generalized to arbitrary functions. The reader may be familiar with these special cases, as some of them have remained in use, and are found in more modern literature; most famous of these is perhaps Halley's (rational) method. We thus show that these are indeed special cases of the generalization of Lagny's method, and describe the names we use for specific cases, depending on the order of their discovery. Our path through more than three centuries of literature to rediscovering---and then generalizing---Lagny's methods led us to many remarkable works; we mention those in a bibliographic note. The second part starts with the treatment of a faithfully rounded---and very nearly correctly rounded---cube root. For each step in the computation of the cube root, we consider multiple alternatives, each with its error analysis; the one that should be chosen is revealed by the final error analysis. We then describe how correct rounding is achieved, by determining whether the faithful result might be misrounded, in which case the correct digit needs to be more carefully ascertained. In order not to interrupt the flow of reading, in both parts, we relegate to the appendices any miscellaneous considerations that are independent of the matter currently being discussed, long-winded proofs, and lists of examples. For the reader's convenience, we have translated in the margin all quotations in languages other than English, or quoted a translation in the margin when a satisfactory one was available. \pagebreak \part{Abridged root-finding methods} \label{Abridged} We recall and generalize a family of root-finding methods from the late 17th century. In \cite{FantetdeLagny1691a}, Lagny first presents the following iterations for the computation of the cube root $\cuberoot{a^3+b}$:\begin{align} \FunctionBody*{a}{\tfrac{1}{2}a+\sqrt{\tfrac{1}{4}a^2+\frac{b}{3a}}}\text, \label{LagnyIrrationalCubeRootIteration} \intertext{hereafter the \emph{(quadratic) irrational method}, and} \FunctionBody*{a}{a+\frac{ab}{3a^3+b}}\text, \label{LagnyRationalCubeRootIteration} \end{align} the \emph{rational method}, mentioning the existence of similar methods for higher powers. In \cite{FantetdeLagny1691b} the above methods are again given, with an outline of the general method for higher powers, and a mention of their applicability to finding roots of polynomials other than $z^p-r$. That general method is given in detail in \cite[19]{FantetdeLagny1692}. Modernizing the notation, the general rule is as follows for finding a root of the monic polynomial of degree $p\geq2$\[ f\of{z}\DefineAs z^p + c_1 z^{p-1} + \dotsb + c_{p-1} z + c_p \DefinitionOf z^p - R\of{z} \] with an initial approximation $a$. Separate alternate terms of the binomial expansion of $\pa{x+\tfrac{1}{2}a}^p$ into sums of degree $p$ and $p-1$ in $x$,\begin{align} S_1\DefineAs\sum{\substack{k=0\\2\Divides k}}[p]\binom{p}{k}x^{p-k}\pa{\tfrac{1}{2}a}^k&\text{ and } S_2\DefineAs\sum{\substack{k=1\\2\DoesNotDivide k}}[p]\binom{p}{k}x^{p-k}\pa{\tfrac{1}{2}a}^k\text, \nonumber \intertext{and consider the following polynomials, of degree $p$ and $p-1$ in $x$ for almost all $a$:} E_{p}\DefineAs S_1-\tfrac{1}{2}R\of{x+\tfrac{1}{2}a}&\text{ and } E_{p-1}\DefineAs S_2-\tfrac{1}{2}R\of{x+\tfrac{1}{2}a}\text. \label{LagnyEquations} \end{align} Let $E_{n-1}$ be the remainder of the polynomial division\footnote{While the rest of the method is a straightforward translation, this step bears some explanations; its description in \cite{FantetdeLagny1692} is % TODO(egg): Right Single Quotation Mark leads to incorrect spacing in French, so we use Apostrophe here (which % gets converted to Right Single Quotation Mark by the typesetting engine). Figure out what is going on. \marginnote{From these two equalities, either taken separately or compared together according to the method of of more-than-determined problems, extract a rational value of $x$, or simply one of a convenient degree.} \begin{quote}\textfrench{De ces deux égalitez, ou priſes ſéparément, ou comparées enſemble ſelon la methode des problêmes plus que déterminez tirez en une valeur d'$x$ rationelle, ou ſimplement d'un degré commode.} \end{quote} It is assumed that the reader is familiar with this ``comparison according to the method of more-than-determined problems''. While the application of the root-finding method is described in painstaking detail in \cite{FantetdeLagny1733}, which outlines the treatment of overdetermined problems, it is perhaps this remark from \cite[494]{FantetdeLagny1697} which lays it out most clearly: \marginnote{There is nothing new to remark on more-than-determined problems of the fourth degree. The general rule is to equate everything to zero, and to divide the higher equation by the lower, or the equally high one by the other, repeatedly until one finds the simplest remainder or divisor.} \begin{quote}\textfrench{Il n'y a rien de nouveau à remarquer ſur les Problemes plus que déterminez du quatriéme degré. La Regle générale eſt d'égaler tout à zero, \& de diviſer la plus haute équation par la moins élevée, ou l'également élevée l'une par l'autre, continuellement juſques à ce que l'on trouve le reſte ou le diviſeur le plus ſimple.} \end{quote}} of $E_{n+1}$ by $E_{n}$; its degree is $n-1$ for almost all $a$. The iteration for finding a root of $f$ is $\FunctionBody{a}{x+\frac{1}{2}a}$, where $x$ is a root of $E_{2}$ in the quadratic irrational method, and the root of $E_{1}$ in the rational method. Its order is $p$. \section*{Multiplicity of the irrational methods} Lagny does not require that the polynomial division be carried out all the way to $E_2$, merely until one gets \marginnote{\footnotesize a [...] value of $x$ [...] of a convenient degree}% \foreign{\textfrench{une valeur d'$x$ \textnormal{[...]} d'un degré commode}}, by which he likely means one that is constructible. When $f$ is a cubic, he uses the term \foreign{\textfrench{formule irrationnelle}} for $x+\frac{1}{2}a$ where $x$ is a root of $E_2$, but when it comes to computing the fifth root, the same term is used to refer to the case where $x$ is a root of $E_4$. In order to avoid confusion, we use the term \emph{quadratic irrational method} when $x$ is a root of $E_2$, and we call the irrational formula for $\sqrt[5]{a^5+b}$ from \cite[43]{FantetdeLagny1692}\footnote{The formula has a misprint in \cite[43]{FantetdeLagny1692}, $-\tfrac{1}{2}a^2$ instead of $-\tfrac{1}{4}a^2$ under the radical. Halley remarks on it and gives the corrected formula in \cite[137,140]{Halley1694}. The misprint remains forty years later in \cite[\pno~440 misnumbered 340]{FantetdeLagny1733}. Bateman writes in \cite{Bateman1938} ``we must not infer that [these expressions] are not correct simply because they differ from Halley's expression'', but with Lagny's construction, which was seemingly unknown to Bateman, the error is plain.}\[ \FunctionBody{a}{\tfrac{1}{2}a+\sqrt{\sqrt{\tfrac{1}{4}a^4+\tfrac{b}{5a}}-\tfrac{1}{4}a^2}} \] Lagny's \emph{quartic irrational method} for the fifth root; the quadratic irrational method for the same fifth root would be\footnote{Both are of order $5$, but the reader who wishes to compute a fifth root should note that leading term of the error of the quartic method is $\frac{2}{7}$ of that of the quadratic. See appendix~\ref{ExampleGeneralizedLagnyMethods}.}\[ \FunctionBody{a}{\frac{a\pa{7b-\sqrt{100a^{10}+100a^{5}b-7b^2}}}{4b-10a^5}}\text. \] More generally, we call $\FunctionBody{a}{x+\frac{1}{2}a}$ \emph{Lagny's method of degree $d$} when $x$ is a root of $E_{d}$. Note however that when $p=3$, \idest, when finding a root of a cubic, Lagny's only irrational method is the quadratic one; we can thus unambiguously refer to (\ref{LagnyIrrationalCubeRootIteration}) as \emph{Lagny's irrational method for the cube root}. \section*{Generalization to arbitrary functions} Lagny's method of degree $d$ and convergence order $p$ may be generalized to functions $f$ other than polynomials of degree $p$, by defining $E_p$ and $E_{p-1}$ in terms of Taylor polynomials for $\FunctionBody x {f\of{x+\frac{1}{2}a}}$ around $x=\frac{1}{2}a$:\begin{equation} E_p\DefineAs T_{p}-\tfrac{1}{2}T_{p-1}\text{ and }E_{p-1}\DefineAs \tfrac{1}{2}T_{p-1}\text, \label{GeneralizedLagnyEquations} \end{equation} where\[ T_{n}\DefineAs\sum{k=0}[n]\frac{f^{\pa{k}}\of{a}}{\Factorial k}\pa{x-\tfrac{1}{2}a}^k\text. \] The rest of the method remains unchanged; the iteration is given by $\FunctionBody{a}{x+\frac{1}{2}a}$ for a root $x$ of $E_{d}$. When $f$ is a monic polynomial of degree $p$, the definitions (\ref{GeneralizedLagnyEquations}) are equivalent\footnote{The Taylor polynomials are then $T_p=f\of{x+\tfrac{1}{2}a}=\pa{x+\tfrac{1}{2}a}^p-R\of{x+\tfrac{1}{2}a}=S_1+S_2-R\of{x+\tfrac{1}{2}a}$ and $T_{p-1}=T_p-\pa{x-\tfrac{1}{2}a}^p =T_p-\pa{S_1-S_2}=2S_2-R\of{x+\tfrac{1}{2}a}$.} to (\ref{LagnyEquations}), so that we recover Lagny's method. When $f$ is not a polynomial of degree $p$, we thus call the method defined by (\ref{GeneralizedLagnyEquations}) the \emph{generalized Lagny method of degree $d$ and order $p$}; we use the terms ``rational'', ``quadratic irrational'', etc.\ for $d=1$, $d=2$, etc.\ respectively. Note that while the $E_n$ defined in this fashion may not have degree $n$ if the higher derivatives of $f$ vanish, \exempligratia, for a polynomial of degree less than $p$, the calculation may be carried out formally for an arbitrary $f$, and the offending function substituted in the result, taking limits as needed to remove singularities; the generalized methods of high order can thus be applied to polynomials of low degree. These methods may equivalently be constructed using the Maclaurin series for $\FunctionBody Δ {f\of{a+Δ}}$ in the correction term $Δ$. Let\[ M_n \DefineAs\sum{k=0}[n] \frac{f^{\pa{k}}\of{a}}{\Factorial k}Δ^k\text,\] and consider the polynomials $\tilde E_p \DefineAs M_p$ and $\tilde E_{p-1} \DefineAs M_{p-1}$ of degree $p$ and $p-1$ in $Δ$. Let $\tilde E_{n-1}$ be the remainder of the polynomial division of $\tilde E_{n+1}$ by $\tilde E_{n}$. The iteration is then $\FunctionBody{a}{a+Δ}$, where $Δ$ is a root of $\tilde E_d$. Examples of the generalized Lagny method for specific values of the function $f$, the order $p$, and the degree $d$ are given in appendix~\ref{ExampleGeneralizedLagnyMethods}. \section*{Names} The generalized Lagny methods form a broad family; many of the methods therein are well-known. Since these special cases are better known under the names of their discoverers, we give a list of such occurences, and use the appropriate names in the remainder of this document. \subsection*{Names of the irrational methods} Halley generalized Lagny's (quadratic) irrational method for the cubic to arbitrary\footnote{\marginnote{Translations from \cite{Halley1809}.}% Lagny's method is general, in that an iteration is given for any polynomial, albeit one whose order changes with the degree. However, while he refers to its results---and even corrects a misprint therein---, Halley did not have access to a copy of \cite{FantetdeLagny1692}, \marginnote{These rules were communicated to me by a friend, as I have not seen the author's book} \begin{quote} Has Regulas, cum nondum librum videram, ab amico communicatas habui \end{quote} and it appears that said friend communicated only the formulæ for the cube and fifth root, as opposed to the general method and its proof, as Halley writes \marginnote{[...] but being almost wholly occupied in extracting the roots of pure powers, especially the cubic, [M.~De Lagney] adds but little to the extraction of the roots of affected equations; and that rather perplexed too, and not sufficiently demonstrated.} \begin{quote} [...] \emph{D.~de Lagney} [...] qui cum totus fere ſit in eliciendis Poteſtatum purarum radicibus, præfertim Cubicâ, pauca tantum eaque perplexa nec ſatis demonſtrata de affectarum radicum extractione ſubjungit. \end{quote} or, about the quartic irrational method for the fifth root, whereon Lagny does not elaborate as it is a direct application of the general method, \begin{quote} \marginnote{But the author has no where given his method of investigation or demonstration, though it seemed most to require it [...].}Author autem nullibi inveniendi methodum ejuſve demonſtrationem concedit, etiamſi maxime deſiderari videatur [...]. \end{quote} Being unaware of this generality, Halley sets out to generalize (\ref{LagnyIrrationalCubeRootIteration}) and (\ref{LagnyRationalCubeRootIteration}) to arbitrary polynomials, and does so by keeping the order constant.} polynomials, retaining cubic convergence; when $f$ is not a polynomial of degree $3$, we thus call the generalized Lagny quadratic irrational method of order $3$ \emph{Halley's irrational method}. This method was given in terms of derivatives by Bateman in \cite[12]{Bateman1938}:\[ \FunctionBody{a}{a-\frac{f'\of{a}}}{f''\of{a}}+\frac{\sqrt{{f'}^2\of{a}-2f\of{a}f''\of{a}}}{f''\of{a}}\text. \] \subsection*{Names of the rational method} Both special cases and generalizations of Lagny's rational method have been discovered multiple times and extensively studied; constructions that take advantage of modern calculus allow us to give a more straightfoward expression for the rational method than was available to Lagny. The proof of the following equivalence is given in appendix~\ref{ProofOfTheProposition}. \begin{proposition} The iteration of the generalized Lagny rational method of order $p$ for a root of the function $f$ is\begin{equation} a\mapsto a + \pa{p-1}\frac{\pa{1/f}^{\pa{p-2}}\of{a}}{\pa{1/f}^{\pa{p-1}}\of{a}}\text.\label{LagnyRationaliteration} \end{equation} \end{proposition} The iteration (\ref{LagnyRationaliteration}) is a special case of the \foreign{Algorithmen $\pa{A^λ_ω}$} defined by Schröder for an arbitrary polynomial $f$ in \cite[349\psq]{Schröder1870}, equation (69); specifically, it is $\pa{A^0_{p-1}}$. As seen in the proof of the proposition, it is also a special case of Householder’s equation (14) from \cite[169]{Householder1970}, which generalizes it by substituting $f/g$ for $f$. The case $g\Identically1$ is mentioned in theorem 4.4.2, and that expression is given explicitly in \cite{SebahGourdon2001}. For $p=2$ and $f$ an arbitrary polynomial, (\ref{LagnyRationaliteration}) is Newton's method, presented by Wallis in \cite[338]{Wallis1685}. For $p=3$ and $f$ an arbitrary polynomial, it is Halley's rational method, given in \cite[142--143]{Halley1694} in an effort to generalize Lagny's (\ref{LagnyRationalCubeRootIteration}). It is usually simply known as Halley's method, as his aforementioned irrational method has comparatively fallen into obscurity; see \cite{ScavoThoo1995}. Considering, as remarked by \cite[334]{Schröder1870}, that a method can often be generalized from arbitrary polynomials or rational functions to arbitrary analytic functions, we call the iteration (\ref{LagnyRationaliteration}) \nowidow[1] % Not really a widow, the list is part of the same paragraph; breaking before the list looks worse. \begin{itemize}[nosep] \item Newton’s method when $p=2$, for arbitrary $f$; \item Lagny’s rational method when $p>2$ and $f$ is a polynomial of degree $p$; \item Halley’s rational method when $p=3$ and $f$ is not a polynomial of degree $3$; \item the Lagny--Schröder rational method of order $p$ otherwise. \end{itemize} We do not simply call this last case ``Schröder’s method'', as it is only a special case of the methods defined in \cite{Schröder1870}, so that the expression would be ambiguous. Note that we avoid the name ``Householder’s method'' which appears in \cite{SebahGourdon2001} and ulterior works (notably \emph{MathWorld} and \emph{Wikipedia}, both citing \cite{SebahGourdon2001}), as it is variably used to refer to either (\ref{LagnyRationaliteration}) or to a method from a different family, namely $\gj_{p+1}$ from \cite[168]{Householder1970}, equation (7), taking $γ_{p+1}\Identically0$ in the resulting iteration; $\gj_3$ is\footnote{We are grateful to Peter Barfuss for this observation.} the iteration given in section~3.0.3 of \cite{SebahGourdon2001}. As mentioned by Householder, both of those were described by Schröder a century prior anyway: Householder’s (7) is Schröder’s (18) from \cite[327]{Schröder1870}. \section*{Bibliographic note} Our foray into the history of these methods was prompted by finding the ``historical background'' section of \cite{ScavoThoo1995} while looking for a reference for Halley’s method: it is mentioned therein that this method, as applied to the cube root, is due to Lagny. Searching for Lagny’s work led us to the historical note \cite{Cantor1861}, wherein a note by the editors Terquem and Gerono reads \begin{quote}\marginnote{\footnotesize Naturally, in mathematics, land of incontrovertible propositions identical in every language and in every country, such encounters cannot fail to be somewhat frequent; nowhere are \emph{actual} plagiarisms so rare, and \emph{apparent} plagiarisms so common than in the exact science by excellence; but to report them is a duty, a service rendered to scientific history.}\textfrench{% Naturellement, en mathématiques, séjour des propositions irréfragables, identiques en toute langue, en tout pays, ces rencontres ne peuvent manquer d'être assez fréquentes; nulle part les plagiats \emph{effectifs} sont si rares, et les plagiats \emph{apparents} si communs que dans la science exacte par excellence; mais les signaler est un devoir, un service rendu à l'histoire scientifique.} \end{quote} The editors then quote a letter by Prouhet, wherein he gives a reference to \cite{FantetdeLagny1692}. Lagny’s work proved far more extensive than we expected: besides the above root finding methods for arbitrary polynomials, it contains an error analysis, and even a discussion of the principles of performance analysis based on a decomposition into elementary operations on---and writing of---decimal digits \cite[5--9]{FantetdeLagny1692}, with a remark on applicability to bases other than ten: a 17th century MIX. Observing that the higher-order examples correspond to the well-known higher order method attributed to Householder in \cite{SebahGourdon2001}, we looked for its properties in \cite{Householder1970} so as to prove that observation, and found that Householder attributes them to Schröder. As mentioned in the translator’s note by Stewart in \cite{SchröderStewart1993}, \begin{quote} A.~S.~Householder used to claim you could evaluate a paper on root finding by looking for a citation of Schröder’s paper. If it was missing, the author had probably rediscovered something already known to Schröder. \end{quote} It is possible that the irrational methods could be expressed using Schröder’s results in one way or another, although most of his methods seem to be rational; in any case, such a formulation is unlikely to be something well-known, as irrational methods are far less popular nowadays---unjustifiedly so, as we shall see in part~\ref{CubeRoot} and appendix~\ref{Performance}. Our generalization of Lagny's irrational methods to arbitrary $f$, which, in the polynomial case, decouples the degree of $f$ from the convergence order, was inspired by Gander's rephrasing in \cite{Gander1985} of Halley's construction from \cite{Halley1694}, wherein the correction term of Halley's irrational method is defined as a root of $M_2$. This change of variables with respect to Lagny's construction drastically simplifies the proof of the proposition. Prouhet’s letter in \cite{Cantor1861} ends with these words: \begin{quote}\marginnote{\footnotesize This is all very abridged; but who will free us from the neverending abridged methods?}\textfrench{% Tout cela est fort abrégé; mais qui nous délivrera des méthodes abrégées, qui n'en finissent pas?}\end{quote} \part{Computing a real cube root} \label{CubeRoot} We now turn to the computation of the cube root of in \texttt{numerics/cbrt.cpp}. \section*{A faithfully rounded cube root} \subsection*{Overview} Our general approach to computing a faithfully rounded cube root of $y>0$ is the one described in \cite{KahanBindel2001}: \begin{enumerate} \item integer arithmetic is used to get a an initial quick approximation $q$ of $\cuberoot y$; \item a root finding method is used to improve that that to an approximation $ξ$ with a third\footnote{When FMA is available, this step and the next one use half the precision, and the last step uses a correctly-rounded $y-x^3$ instead of an exact $x^3$; see appendix~\ref{FMA}.} of the precision; \item $ξ$ is rounded to a third of the precision, resulting in the rounded approximation $x$ whose cube $x^3$ can be computed exactly; \item a single high order iteration of a root finding method is used to get the faithfully rounded result $r_0$. \end{enumerate} \subsection*{Notation} We define the fractional part as $\FractionalPart a\DefineAs a-\Floor a\in\intclop{0}{1}$, regardless of the sign of $a$. The floating-point format used throughout is binary64; the quantities $p\in\N$ (precision in bits) and $\bias\in\N$ are defined as in IEEE 754-2008, $p=53$ and $\bias=1023$. Some of the individual methods discussed may be of general use; we thus give all inexact constants used in such methods, as well as the error bounds, rounded to forty decimal places and thirty-three hexadecimal places, which amply suffices for decimal128, binary128, and all smaller formats. A superscript sign after the last digit serves as the sticky bit\footnote{We learned of this practice from Steve Canon, who found it in a re-edition of \cite[VIII]{Bruhns1870}; there it is only present on the digit $5$, to guard against double-rounding to the nearest decimal place. As mentioned in Hoüel's foreword to \cite[II]{Schrön1873}, this practice, originally seen as a way to convey another bit of precision rather than a way to ensure correct rounding, dates back to at least 1827; see \cite[X]{Babbage1827}, 8th rule. Like Babbage and Schrön, we give this bit regardless of the last digit; this allows for directed rounding.}: the unrounded quantity is in excess of the rounded one if the sign is $+$, and in default if it is $-$. The results of numerical experiments are reported with combined standard uncertainties as specified by JCGM~100:2008, 7.2.2. Unless otherwise noted, they are obtained by sampling a billion values uniformly at random from the set $\intclop{1}{8}\Intersection\text{binary64}$. We use capital Latin letters for fixed-point numbers involved in the computation, and $A>0$ for the normal floating-point number $a>0$ reinterpreted as a binary fixed-point\footnote{The implementation uses integers (obtained by multiplying the fixed-point numbers by $2^{p-1}$). For consistency with \cite{KahanBindel2001} we work with fixed-point numbers here. Since we do not multiply fixed point numbers together, the expressions are unchanged.} number with $p-1$ bits after the binary point,\begin{align} A &\DefineAs \bias + \Floor{\log_2 a} + \FractionalPart \pa{2^{-\Floor{\log_2 a}}a}\label{defA}\\ &= \bias + \Floor{\log_2 a} + 2^{-\Floor{\log_2 a}}a - 1\nonumber\\ \intertext{since $2^{-\Floor{\log_2 a}}a\in\intclop{1}{2}$, and \emph{vice versa},} a &\DefineAs 2^{\Floor A-\bias} \pa{1+\FractionalPart{A}}\label{defa}\text. \end{align} This corresponds to \cite{KahanBindel2001}'s $B+K+F$. For both fixed- and floating-point numbers, given $α\in\R$, we write: \begin{itemize}[nosep] \item $\round{α}$ for the nearest representable number, rounding ties to even: IEEE 754-2008 rounding-direction attribute roundTiesToEven; \item $\roundTowardPositive{α}$ for the nearest representable number no smaller than $α$: roundTowardPositive; \item $\roundTowardNegative{α}$ for the nearest representable number no larger than $α$: roundTowardNegative; \item $\roundTowardZero{α}$ for the nearest representable number no larger in magnitude than $α$: roundTowardZero. \end{itemize} We write the unit roundoff $u\DefineAs2^{-p}$ (for rounding to nearest), and, after \cite[63]{Higham2002}, $γ_n\DefineAs\frac{nu}{1-nu}$. To quote \cite{Trefethen1997}, ``If rounding errors vanished, 95\% of numerical analysis would remain''. While we keep track of rounding errors throughout, they are of very little importance until the last step; when it is convenient to solely study the truncation error, we work with ideal quantities affected with a prime, which correspond to their primeless counterparts by removal of all intervening roundings. The input $y$ and all intervening floating-point numbers are taken to be normal; the rescaling performed to avoid overflows also avoids subnormals. We work only with correctly rounded addition, subtraction, multiplication, division, and square root; FMA is treated separately in appendix~\ref{FMA}. \subsection{Quick approximation}\label{QuickApproximation} The quick approximation $q$ is computed using fixed-point arithmetic as\[ Q\DefineAs C + \roundTowardZero{\frac{Y}{3}}\text, \] where the fixed-point constant $C$ is defined as\footnote{Note that there is a typo in the corresponding expression $C\DefineAs\pa{B-0.1009678}/3$ in \cite{KahanBindel2001}; a factor of $2$ is missing on the bias term.}\[ C\DefineAs \round{\frac{2\bias-Γ}{3}} \] for some $Γ\in\R$. As we will now show, this step is effectively an argument reduction; we will discuss the choice of the free parameter $Γ$ below. Let $ε_q \DefineAs \frac{q}{\sqrt[3] y}-1$, % TODO(egg): Figure out why \cuberoot does not render here. so that $\cuberoot y\pa{1+ε_q}=q$; the relative error of $q$ as an approximation of $\cuberoot y$ is $\abs{ε_q}$. Considering $Y$, $Q$, $q$, and $ε_q$ as functions of $y$, we have\begin{align*} Y\of{8y} &= Y\of{y} + 3\text,\\ Q\of{8y} &= Q\of{y} + 1\text,\\ q\of{8y} &= 2q\of{y}\text,\\ ε_q\of{8y} &= ε_q\of{y}\text, \end{align*} so that the properties of $ε_q$ need only be studied on some interval of the form $\intclop{η}{8η}$. Pick $η\DefineAs2^{\Floor{Γ}}$, and $y\in\intclop{η}{8η}=\intclop{2^{\Floor{Γ}}}{2^{\Floor{Γ}+3}}$, so that $\log_2 y\in\intclop{\Floor{Γ}}{\Floor{Γ}+3}$. Let $k\DefineAs\Floor{\log_2 y}-\Floor{Γ}$; note that $k\in\set{0,1,2}$. Let $f\DefineAs\FractionalPart\pa{2^{-\Floor{\log_2 y}}y}\in\intclop{0}{1}$, so that\[ y=2^{\Floor{\log_2 y}}\pa{1+\FractionalPart\pa{2^{-\Floor{\log_2 y}}y}}=2^{\Floor{Γ}+k}\pa{1+f}\text. \] Up to at most $3$ half-units in the last place from rounding ($2$ from the directed rounding of the division by three and $1$ from the definition of $C$), we have, using the definition (\ref{defA}) of $Y$,\begin{align*} % See https://tex.stackexchange.com/a/41091 for the ={}&. Q\approx Q'\DefineAs{}&\bias+\frac{\Floor{\log_2 y}}{3}+\frac{\FractionalPart\pa{2^{-\Floor{\log_2 y}}y}-Γ}{3}\text,\\ ={}&\bias+\frac{\Floor{Γ}+k}{3}+\frac{f-Γ}{3}\text,\\ ={}&\bias+\frac{k+f-\FractionalPart Γ}{3}\text. \end{align*} Since $k\in\intclos{0}{2}$, the numerator $k+f-\FractionalPart Γ$ lies in $\intopen{-1}{3}$. Further, it is negative only if $k=0$, so that\begin{align*} \Floor{Q'}&=\begin{cases} \bias - 1 & \text{if }k = 0\text{ and }\FractionalPart Γ > f\text, \\ \bias & \text{otherwise,} \end{cases}\text{ and}\\ \FractionalPart{Q'}&=\begin{cases} 1+\frac{f-\FractionalPart Γ}{3} & \text{if }k = 0\text{ and }\FractionalPart Γ > f\text, \\ \frac{k+f-\FractionalPart Γ}{3} & \text{otherwise.} \end{cases} \end{align*} Accordingly, for the quick approximation $q$, we have, again up to at most $3$ half-units in the last place, by the definition (\ref{defa}) of $q$,\begin{align*} q\approx q' &= \begin{cases} 1+\frac{f-\FractionalPart Γ}{6} & \text{if }k = 0\text{ and }\FractionalPart Γ > f\text, \\ 1+\frac{k+f-\FractionalPart Γ}{3} & \text{otherwise,} \end{cases} \end{align*} so that for each $k$, $q'$ is a piecewise affine function of $f$. With $\cuberoot y = 2^{\frac{\Floor{Γ}+k}{3}}\cuberoot{1+f}$, we express $ε_q' \DefineAs \frac{q'}{\sqrt[3] y}-1$ piecewise as a function of $f$ and $k$. The maximum of $ε_q'$ gives us a bound on the relative error of $q$, as\[ \abs ε_q \leq \abs{ε_q'}\pa{1+3u}\text. \] The values $Γ=0.1009678$ and $ε_q<3.2\%$ from \cite{KahanBindel2001} may be recovered by choosing $Γ$ minimizing the maximum of $\abs{ε_q'}$ over $y\in\intclop{η}{8η}$, or equivalently.\begin{align*} Γ_{\mathrm{Kahan}}\DefineAs\argmin_{Γ\in\R}\max_{y\in\intclop{η}{8η}}\abs{ε_q'} &=\argmin_{Γ\in\R}\max_{\tuple{f, k}}\abs{ε_q'}\\ \intertext{where the maximum is taken over $\tuple{f, k}\in\intclop{0}{1}\Cartesian\set{0,1,2}$,} &=\argmin_{Γ\in\R}\max_{\tuple{f, k}\in\mathscr{E}\Union\mathscr{L}}\abs{ε_q'}\text, \end{align*} where $\mathscr{E}\DefineAs\set{\tuple{\FractionalPart Γ, 0}}\Union\setst{\tuple{0, k}}{k\in\set{0,1,2}}$ is the set of the endpoints of the intervals whereon $q'$ is piecewise affine, and $\mathscr{L}\DefineAs\setst{\tuple{\frac{k-\FractionalPart Γ}{2},k}}{k\in\set{1,2}}$ are the smooth local extrema. We get more precisely\footnote{These values may be computed formally, but the expression is unwieldy.}\begin{align*} Γ_{\mathrm{Kahan}} &\approx 0.10096\,78121\,55802\,88786\,36993\,42643\,55358\,06490^-\\ &\approx \hex{0.19D9\,06CB\,2868\,81F4\,88FD\,38DF\,E7F6\,98DD\,B^+} \intertext{with} \max_{y}\abs{ε_q'} &\approx 3.15546\,32773\,62480\,60611\,78973\,32817\,13558\,9400^+\%\\ &\approx \hex{1.027E\,DC79\,99AB\,08D3\,928D\,83B0\,17CC\,E876^-}\Multiply 2^{-5}\\ \end{align*} yielding the constant\[C_{\mathrm{Kahan}}=\hex{2A9F\,7625\,3119\,D328}\Multiply 2^{-52}\]for IEEE 754-2008 binary64. However, as we will see in the next section, this value does not optimize the final error. \subsection{Getting to a third of the precision}\label{ThirdPrecision} We now consider multiple methods for the refinement of $q$ to $ξ$. The rounding error in this step being both negligible and tedious to bound, its analysis is relegated to appendix~\ref{LagnyStepTwoRounding}. Here we will study only the truncation error, and thus work only with the primed quantities. \subsubsection*{Lagny's rational method} One way to compute $ξ'$ is Lagny's rational method applied to $\cuberoot y =\cuberoot{q^3+\pa{y-q^3}}$ with the starting estimate $q'$, \[ ξ'=q'+\frac{q'\pa{y-{q'}^3}}{2{q'}^3+y}\text, \] with the error\[ ε_ξ' \DefineAs \frac{ξ'}{\sqrt[3] y}-1\text. \] Substituting $q'=\cuberoot{y}\pa{1+ε_q'}$, we can express $ε_ξ'$ using the transformation of the relative error by one step of Lagny’s rational method on the cube root,\[ ε_ξ' = \frac{2{ε_q'}^3+{ε_q'}^4}{3+6ε_q'+6{ε_q'}^2+2{ε_q'}^3} = \tfrac{2}{3}{ε_q'}^3 + \BigO\of{{ε_q'}^4}\text. \] If $q'$ is computed using $Γ=Γ_{\mathrm{Kahan}}$, we get $\max_{y} \abs{ε_ξ'} \approx 21.96\Multiply10^{-6}$, $\log_2 \max_{y} \abs{ε_ξ'} \approx -15.47$. However, $Γ_{\mathrm{Kahan}}$, which minimizes $\max_{y} \abs{ε_q'}$, does not minimize $\max_{y} \abs{ε_ξ'}$. This is because while $ε_ξ'$ is monotonic as a function of $ε_q'$, it is not odd: positive errors are reduced more than negative errors are, so that the minimum is attained for a different value of $Γ$. Specifically, we have\begin{align*} Γ_{\mathrm{L^1}}&\DefineAs\argmin_{Γ\in\R}\max_{y}\abs{ε_ξ'}\\ &\approx 0.09918\,74615\,29855\,99525\,66149\,20761\,31234\,34720\,2^{+}\\ &\approx \hex{0.1964\,5977\,71A9\,4DE0\,A8AF\,47A0\,0B1B\,C052\,B^+} \intertext{with $\max_{y} \abs{ε_q'} \approx 3.203\%$, but} \max_{y} \abs{ε_ξ'} &\approx 20.86863\,55363\,95934\,87709\,20083\,98441\,02541\,483^+\Multiply10^{-6}\\ &\approx\hex{1.5E1E\,1B6D\,9718\,42F4\,89C2\,EC7B\,2EC0\,ECC1^-}\Multiply 2^{-16}\text, \end{align*} $\log_2 \max_{y} \abs{ε_ξ'} \approx -15.55$. The corresponding fixed-point constant is\[C_{\mathrm{L^1}}\DefineAs\hex{2A9F\,7893\,782D\,A1CE} \Multiply 2^{-52}\]for binary64. While it is not far from the seventeen bits to which we will round in the next step, this error is still larger, and in any case is not comparatively negligible. As a result, it significantly contributes to misrounding, see (\ref{MisroundingError}). Lagny's lesser-known irrational method provides us with a way to improve it. \subsubsection*{Lagny's irrational method} As written in (\ref{LagnyIrrationalCubeRootIteration}), Lagny's irrational method \begin{equation}ξ' = \tfrac{1}{2}q'+\sqrt{\tfrac{1}{4}{q'}^2+\frac{y-{q'}^3}{3q'}}\label{IrrationalNoRewrite}\end{equation} seems prohibitively computationally expensive in comparison to the rational one: it adds a square root on the critical path, dependent on the result of a division. However, rewriting\footnote{This kind of rewriting is generally effective for quadratic irrational methods; see the discussion in appendix~\ref{Performance}.} it as, \exempligratia,\[ ξ' = \pa{\sqrt{3}{q'}^2+\sqrt{4yq'-{q'}^4}}\frac{1/\sqrt{12}}{q'}\text, \] one can evaluate it with similar performance to the rational method. Its error is\[ ε_ξ' = \frac{-{ε_q'}^3}{3\pa{\tfrac{1}{2}+\sqrt{\tfrac{1}{2}-2{ε_q'}^2-\tfrac{4}{3}{ε_q'}^3-\tfrac{1}{3}{ε_q'}^4}- {ε_q'}^2}} = -\tfrac{1}{3}{ε_q'}^3 + \BigO\of{{ε_q'}^4}\text, \] whose leading term is half that of the rational method; indeed we find that with $Γ=Γ_{\mathrm{Kahan}}$, we have $\max_{y} \abs{ε_ξ'} \approx 10.48\Multiply10^{-6}$, $\log_2 \max_{y} \abs{ε_ξ'} \approx -16.54$, gaining one bit with respect to the rational method. Here $Γ=Γ_{\mathrm{Kahan}}$ is very close to optimal; with the optimal value\begin{align*} Γ_{\mathrm{L^2}}&\approx0.10096\,82076\,65096\,37285\,40885\,52460\,33434\,63385^-\\ &\approx \hex{0.19D9\,0D6D\,DB79\,AE1F\,D556\,591B\,78EF\,F3DD\,B^+\text,} \intertext{the error bound} \max_{y} \abs{ε_{ξ}'}&\approx 10.48337\,57985\,85309\,87229\,03375\,83237\,37064\,369^+\Multiply 10^{-6}\\ &\approx \hex{1.5FC3\,832D\,82FF\,67E3\,E4A4\,C2FD\,A877\,7C2E^-}\Multiply 2^{-17} \end{align*} improves only in its sixth decimal place. However, we have other ways of improving the error at no cost to performance. \subsubsection*{Canon optimization of Lagny's irrational method} The idea for this optimization comes from \cite{Canon2018a}, reproduced here with the author’s permission: \begin{quotation} % https://twitter.com/stephentyrone/status/1016283784067665920 A trick I’ve used for years and should write up: you can apply optimization to the iteration, not just the starting guess: $x' = x p\of x$, select $p\of x$ to be minimax error on bounded initial error in $x$. This yields a nice family of tunable approximations. % https://twitter.com/stephentyrone/status/1016284058438062081 Everyone else seems to worry about starting estimate, but use standard iterations, which is appropriate for arbitrary precision, but silly with a fixed precision target. % https://twitter.com/stephentyrone/status/1016328842296864770 Note that as $p$ gets to be high-order, it converges quickly to the Taylor series for the correction, but there's a nice space with cheap initial approximations and order $2$--$5$ or so, because we can evaluate these polynomials with lower latency [than] serially-dependent iterations. \end{quotation} Canon later elaborated on this in \cite{Canon2018b}: \begin{quotation} % https://twitter.com/stephentyrone/status/1057788315699687424 Quick version: we want to compute $1/\sqrt{y}$, we have an approximation $x_0$, we want to improve it to $x_1 = x_0 p\of{x_0, y}$. For efficiency, we want $p$ to be a polynomial correction.\\ \textit{Handwavy motivation for brevity} make $p$ a polynomial in $x_0^2y$, which is approximately $1$. % https://twitter.com/stephentyrone/status/1057789050810232834 Specifically, if $x_0$ has relative error $e$, $x_0^2y$ is bounded by something like $1 \pm 2e$. So, we want to find $p$ that minimizes $\abs{x/x_0 - p\of{x_0^2y}}$ on $\intclos{1-2e}{1+2e}$. NR\footnote{Newton--Raphson, \idest, Newton's method; Raphson described a different formulation of it in \cite{Raphson1690}. Lagrange notes in \volcite{8}[161]{Lagrange1867} that Raphson was likely aware of Newton's method, and thus regarded his as entirely different; as Lagrange points out, they are equivalent.} uses the $p = 1$st order Taylor. We know that we can do better via usual approximation theory techniques. % https://twitter.com/stephentyrone/status/1057789379052232704 We can also use higher-order approximations to hit any specific accuracy target in a single step. This isn't always better than iterating, but sometimes it is. \end{quotation} We do not use a polynomial---nor even a rational function---, nor do we express our refinement as a function of a quantity bounded by the error. However, we take advantage of Canon's key idea of ``apply[ing] optimization to the iteration, not just the starting guess''; the latter is what we have so far done with $Γ$. The constants $\frac{1}{2}$, $\frac{1}{4}$, and $3$ in Lagny's irrational method may be modified with no effect on performance; altering the first two of these introduces rounding errors, but these need not concern us here. We thus write (\ref{IrrationalNoRewrite}) as \[ξ' = κq'+\sqrt{λ{q'}^2+\frac{y-{q'}^3}{μq'}}\] and choose $Γ$, $κ$, $λ$, and $μ$ minimizing relative error in the Чебышёв norm,\[ \tuple{Γ_{\mathrm{L^2C}}, κ_{\mathrm{L^2C}}, λ_{\mathrm{L^2C}},μ_{\mathrm{L^2C}}} \DefineAs\argmin_{Γ,κ,λ,μ}\max_{y}\abs{ε_ξ'}\text.\] Unfortunately, computing $\max_{y} \abs{ε_ξ'}$ is not as easy as for the standard methods; the introduction of $κ$, $λ$, and $μ$ breaks the monotonicity of $ε_ξ'\of{ε_q'}$, so that the local extrema of $ε_ξ'$ are not found in the same place as those of $ε_q'$. Formally looking for zeros of the derivative of $ε_ξ'$ with respect to $f$ is impractical. Instead we find the local maxima by numerical maximization on the four pieces on which $q'$ is a smooth function of $f$. That maximum can be minimized by a straightforward hill-climbing\footnote{It is plausible that some variation of Ремез's algorithm could be used here, much like it can be adapted to rational functions; since the hill-climbing converged satisfactorily, and did so much faster than we were writing this document, we have not investigated this.} starting from $Γ=\frac{1}{10}$, $κ=\frac{1}{2}$, $λ=\frac{1}{4}$, and $μ=3$. We obtain the values \marginfig[Error of $ξ'$ for various methods.\label{figStepTwoComparison}]{\hfill\llap{\includegraphics[scale=0.7]{cbrt_ξ_error.pdf}}} \begin{align*} Γ_{\mathrm{L^2C}} &\approx 0.10007\,61614\,69941\,46538\,73178\,74111\,71965\,58348^-\text,\\ κ_{\mathrm{L^2C}} &\approx 0.49999\,99381\,08574\,04775\,14291\,72928\,30652\,88838^-\text,\\ λ_{\mathrm{L^2C}} &\approx 0.25000\,00000\,00145\,58487\,81104\,01052\,77249\,27607^+\text,\\ μ_{\mathrm{L^2C}} &\approx 3.00074\,62871\,20756\,72280\,51404\,24030\,90919\,8768^-\text,\\ \max_{y}\abs{ε_ξ'} &\approx2.61568\,73856\,96087\,03169\,94140\,65268\,27137\,2496^-\Multiply10^{-6}\text,\\ \intertext{\centering in hexadecimal} Γ_{\mathrm{L^2C}} &\approx \hex{0.199E\,9760\,9F63\,9F90\,626F\,8B97\,2B3A\,6249\,2^+},\\ κ_{\mathrm{L^2C}} &\approx \hex{0.7FFF\,FEF6\,2DA8\,524D\,ECD5\,911F\,4DB1\,F8304^-},\\ λ_{\mathrm{L^2C}} &\approx \hex{0.4000\,0000\,0028\,FA7E\,FCCD\,1CCB\,662C\,3189F^+},\\ μ_{\mathrm{L^2C}} &\approx \hex{3.0030\,E89E\,C6EE\,7608\,E9C4\,EAD9\,4965\,F67A^-},\\ \max_{y}\abs{ε_ξ'} &\approx\hex{1.5F12\,558F\,C4F0\,FD38\,6AF9\,BD60\,9F87\,1BEC^-}\Multiply2^{-19}\text, \end{align*} $\log_2\max_{y}\abs{ε_ξ'} \approx -18.54$: this optimization gains two bits. The resulting $ε_ξ'$ is remarkably equioscillating, as can be seen in figure~\ref{figStepTwoComparison}. Note that a similar optimization could be applied to the rational method; however, it would not unconditionally be free: changing the $2$ in the denominator turns an addition into a multiplication, and inserting additional constants adds more operations. Whether this hinders performance depends on the architecture. In any case, the optimization can scarcely gain more than two bits; such an optimized rational method would still have double the error of the optimized irrational method. \subsection{Rounded approximation} \label{RoundedApproximation} The number $x$ is obtained from $ξ$ by rounding to $\smash{\Floor{\frac{p}{3}}}$ bits. \subsubsection*{Directed rounding toward zero} An easy solution is to zero all but the most significant $\smash{\Floor{\frac{p}{3}}}$ bits of $ξ$. The resulting relative error $\abs{\frac{x}{ξ}-1}$is greatest when the zeroed bits are all $1$ and the remaining bits (except for the leading $1$) are all $0$; this is the case when the significand of $ξ$ is $1+2^{-\Floor{\frac{p}{3}}+1}-2^{1-p}$, in which case that of $x$ is $1$, so that\[ \abs{\frac{x}{ξ}-1}\le1-\frac{1}{1+2^{-\Floor{\frac{p}{3}}+1}-2^{1-p}} < 2^{-\Floor{\frac{p}{3}}+1}=2^{-16}\text. \] For the error of $x$ as an approximation of the cube root, $ε_x\DefineAs\frac{x}{\sqrt[3]{y}}-1$, we have the bound $\abs{ε_x} < \pa{1+\abs{ε_ξ}}\pa{1+2^{-16}}-1$. \subsubsection*{Rounding to nearest} An alternative is to use Dekker's method to round to the nearest number with the desired number of bits; see \cite[235,241]{Dekker1971} in \emph{mul12}:\[ ϖ\DefineAs\round{ξ\pa{2^{p-\Floor{\frac{p}{3}}}+1}}\text{, }x\DefineAs\round{\round{ξ-ϖ}+ϖ}\text. \] The rounding error is then bounded by $2^{-\Floor{\frac{p}{3}}}$; we have $\abs{ε_x} < \pa{1+\abs{ε_ξ}}\pa{1+2^{-17}}-1$. The chosen alternative will be determined by performance--accuracy trade-offs. This will be discussed in appendix~\ref{Performance}. \subsection{High order iteration} \label{HighOrder} We compute the faithfully rounded result $r_0$ as the correctly rounded sum of the rounded approximation and a correction term,\begin{equation} r\DefineAs x+Δ\text{, }r_0\DefineAs\round{r}\text,\label{AdditiveCorrection} \end{equation} where the correction term $Δ$ is that of a high-order root finding method. As usual, we call the infinite-precision correction term $Δ'$, and $r'\DefineAs x+Δ'$. The truncation error is \[ε_r'\DefineAs\frac{x+Δ'}{\cuberoot y}-1\text.\] The rounding error on the correction term is $δ\DefineAs\frac{Δ}{Δ'}-1$. The error of $r$ is thus\begin{align*} ε_r\DefineAs{}&\frac{x+Δ}{\cuberoot y}-1\\ ={}&\frac{x+Δ'\pa{1+δ}}{\cuberoot y}-1\\ ={}&\frac{x+Δ'+\pa{x+Δ'-x}δ}{\cuberoot y}-1\\ ={}&ε_r'+\pa{ε_r'-ε_x}δ\text,\numberthis\label{MisroundingError} \end{align*} and that of the faithfully-rounded result $r_0$ is $ε_r\pa{1+υ}$ for some $\abs υ \le u$. It is easy to make $ε_r'$ negligible by increasing the order of the method; the main contribution to misrounding is then $δ\abs{ε_x}<δ\pa{\pa{1+\abs{ε_ξ}}\pa{1+2^{w}}-1}$, where $w$ is $-16$ or $-17$ depending on the choice of rounding: this is why $\abs{ε_ξ}$ needed to be kept low in step~\ref{ThirdPrecision}. We now compute bounds for $ε_r'$ and $δ$ for two different methods. In the interest of brevity, we have not considered the generalized Lagny irrational methods here; both the rational and quadratic irrational methods of order $4$ have overly high truncation error, and the irrational methods do not lend themselves to being written in the form (\ref{AdditiveCorrection}) without ill-conditioned (and inexact) operations in the expression for $Δ$. \subsubsection*{Fifth order rational} We use one iteration of the Lagny--Schröder rational method of order $5$: \[ Δ=\round{ \frac{\round{\pa{y-x^3} \round{\round{\round{\round{10x^3} + 16y}x^3} + \round{y^2}}}} {\round{x^2 \round{\round{\round{\round{15 x^3} + \round{51 y}}x^3} + \round{15 \round{y^2}}}}}} \] where $x^2$ and $x^3$ are exact thanks to the trailing $0$s of $x$, and $y-x^3$ is exact by Sterbenz’s lemma. In infinite precision, this method is of such high order that if $\log_2\abs{ε_x} < 14.5$, which is the case even if $ξ$ is computed by the rational method, the relative error $ε_r'$ of the result is less than $2^{-75}$; if $ξ$ is computed by the irrational method with Canon optimization, the error is below $2^{-86}$. We will not seek to bound the truncation error more closely, nor to tweak the constants in the method to optimize it: as we will see, it is dominated by rounding. Thanks to the exact cube and exact difference, the rounding analysis of the correction term is straightforward. All remaining sums being of positive terms, their relative error is readily bounded by the largest of those of their terms. This leads to bounds of $γ_{5}$ on the numerator and $γ_{5}$ on the denominator, overall \[ δ<\frac{1+γ_{5}+γ_{1}}{1-γ_{5}}-1<\frac{1+γ_{6}}{1-γ_{5}}-1<\frac{γ_{11}}{1-γ_{5}}\approx11u\text.\] However, considering that our final bound on $ε_r$, and thus our misrounding estimate, is nearly proportional to this error, a more careful analysis is warranted. Observe that $x^3=y\pa{1+ε_x}^3$, so that a sum $Σ'=α x^{m}y^{p-m} + β x^{n}y^{p-n}$ evaluated with terms that carry the errors $δ_i$ as $Σ=α x^{m}y^{p-m}\pa{1+δ_1} + β x^{n}y^{p-n}\pa{1+δ_2}$ has the error\[ \frac{Σ}{Σ'}-1=\frac{\pa{1+ε_x}^{3m}αδ_1+\pa{1+ε_x}^{3n}βδ_2}{\pa{1+ε_x}^{3m}α+\pa{1+ε_x}^{3n}β}\text, \] which we may bound, assuming without loss of generality that $m\le n$, as\[ \abs{\frac{Σ}{Σ'}-1}\le ι^{3n}\frac{αδ_1+βδ_2}{α+β}\text, \] where $ι\DefineAs\frac{1+\abs{ε_x}}{1-\abs{ε_x}}$. With this property, we can, in particular, take advantage of the exact multiplication by $16$ in the numerator; the rounding error analysis of the numerator is as follows, using the fact that $λu<γ_λ$ for $0<λ<\frac{1}{u}$ and the subadditivity of $γ$ in the same domain. \begin{center} % The phantom/llap dance is there to work around the impossibility of alignment across % matched brackets. \begin{tabular*}{\textwidth}{l @{\extracolsep{\fill}} r} Expression & Bound on the rounding error\\ \hline $\hphantom{\leftRound{\pa{y-x^3}\leftRound{\leftRound{\leftRound{\round{10x^3}+16y}}}}} \mathllap{\round{10x^3} + 16y}$ & $ι^{3}\frac{10u}{26} < γ_{\frac{10}{26}ι^{3}}$ \\ $\hphantom{\leftRound{\pa{y-x^3}\leftRound{\leftRound{\round{\round{10x^3}+16y}}}}} \mathllap{\round{\round{10x^3} + 16y}}$ & $γ_{\frac{10}{26}ι^{3}+1}$ \\ $\hphantom{\leftRound{\pa{y-x^3}\leftRound{\round{\round{\round{10x^3}+16y}x^3}}}} \mathllap{\round{\round{\round{10x^3} + 16y}x^3}}$ & $γ_{\frac{10}{26}ι^{3}+2}$ \\ $\hphantom{\leftRound{\pa{y-x^3}\leftRound{\round{\round{\round{10x^3}+16y}x^3}+\round{y^2}}}} \mathllap{\round{\round{\round{10x^3} + 16y}x^3} + \round{y^2}}$ & % This rounding error runs into the footprint of the 𒋛𒄥𒊏𒋾, so we need a mathllap; % this looks OK because there is plenty of margin between it and the current level. $\mathllap{ι^{6}\frac{1}{27}\pa{26γ_{\frac{10}{26}ι^{3}+2} + u}<\frac{1}{27}γ_{10ι^{9}+\pa{2\Multiply 26+1}ι^{6}}}$\\ $\hphantom{\leftRound{\pa{y-x^3}\round{\round{\round{\round{10x^3}+16y}x^3}+\round{y^2}}}} \mathllap{\round{\round{\round{\round{10x^3} + 16y}x^3} + \round{y^2}}}$ & $\frac{1}{27}γ_{10ι^{9}+\pa{2\Multiply 26+1}ι^{6}+27}$\\ $\round{\pa{x^3 - y} \round{\round{\round{\round{10x^3} + 16y}x^3} + \round{y^2}}}$ & $\frac{1}{27}γ_{10ι^{9}+\pa{2\Multiply 26+1}ι^{6}+2\Multiply27}$ \end{tabular*} \end{center} The resulting bound on the numerator is approximately $4.33u$, an improvement of $\frac{2}{3}u$ over the naïve bound of $5u$. We may build a similar {\xsuxfont 𒋛𒄥𒊏𒋫} for the denominator. \begin{center} \begin{tabular*}{\textwidth}{l @{\extracolsep{\fill}} r} Expression & Bound on the rounding error\\ \hline $\hphantom{\leftRound{x^2 \leftRound{\leftRound{\leftRound{\round{15 x^3} + \round{51 y}}}}}} \mathllap{\round{15 x^3} + \round{51 y}}$ & $γ_1$\\ $\hphantom{\leftRound{x^2 \leftRound{\leftRound{\round{\round{15 x^3} + \round{51 y}}}}}} \mathllap{\round{\round{15 x^3} + \round{51 y}}}$ & $γ_2$\\ $\hphantom{\leftRound{x^2 \leftRound{\round{\round{\round{15 x^3} + \round{51 y}}x^3}}}} \mathllap{\round{\round{\round{15 x^3} + \round{51 y}}x^3}}$ & $γ_3$\\ $\hphantom{\leftRound{x^2 \leftRound{\round{\round{\round{15 x^3} + \round{51 y}}x^3}+\round{15 \round{y^2}}}}} \mathllap{\round{\round{\round{15 x^3} + \round{51 y}}x^3} + \round{15 \round{y^2}}}$ & $ι^6\frac{66γ_3+15γ_2}{81}<\frac{1}{81}γ_{228ι^6}$\\ $\hphantom{\leftRound{x^2 \round{\round{\round{\round{15 x^3} + \round{51 y}}x^3}+\round{15 \round{y^2}}}}} \mathllap{\round{\round{\round{\round{15 x^3} + \round{51 y}}x^3} + \round{15 \round{y^2}}}}$ & $\frac{1}{81}γ_{228ι^6+81}$\\ $\round{x^2 \round{\round{\round{\round{15 x^3} + \round{51 y}}x^3} + \round{15 \round{y^2}}}}$ & $\frac{1}{81}γ_{228ι^6+2\Multiply81}$ \end{tabular*} \end{center} This is a bound of about $4.81u$ on the denominator, a more modest improvement of $\frac{5}{27}u$ over our earlier $5u$. Overall, we get the bound\[ δ<\frac{1+\frac{1}{27}γ_{10ι^{9}+\pa{2\Multiply 26+1}ι^{6}+2\Multiply27}+γ_1}{1-\frac{1}{81}γ_{228ι^6+2\Multiply81}}-1 <\frac{1+\frac{1}{27}γ_{10ι^{9}+\pa{2\Multiply 26+1}ι^{6}+3\Multiply27}}{1-\frac{1}{81}γ_{228ι^6+2\Multiply81}}-1\text, \] approximately $10.14u$. The resulting bound, using Lagny's irrational method with Canon optimization in step~{\ref{ThirdPrecision}} and rounding to nearest in step~{\ref{RoundedApproximation}}, is\begin{equation} \abs{ε_r}<0.00010397576244095^+u\text,\label{FaithfulBound} \end{equation} where all figures given are correct if the rounding error in the computation of $ξ$, namely $\abs{\frac{ε_ξ}{ε_ξ'}-1}$, does not exceed $100u$. \subsubsection*{Sixth order rational} An alternative is the Lagny--Schröder rational method of order $6$:\[ Δ=\round{\frac{ \round{\round{x\pa{y-x^3}}\round{\round{\round{\round{5x^3}+\round{17y}}x^3}+\round{5\round{y^2}}}}}{ \round{\round{\round{\round{7x^3} + \round{42 y}}\round{x^6}}+\round{\round{\round{30x^3}+2y}\round{y^2}}}}}\text, \] where $x^3$ is exact thanks to the trailing $0$s of $x$, $\round{x^6}$ is correctly rounded because it is computed as the square of $x^3$, and $y-x^3$ is exact by Sterbenz’s lemma. Here we have $\abs{ε_r'}<2^{-100}$ if $\abs{ε_x}<2^{-14}$, so the truncation error is even more negligible. For rounding error, the maximum bound on the error of the sums gives us a naïve bound of $γ_6$ on the numerator, $γ_5$ on the denominator, overall \[δ<\frac{1+γ_6+γ_1}{1-γ_5}-1<\frac{1+γ_{7}}{1-γ_{5}}-1<\frac{γ_{12}}{1-γ_{5}}\approx12u\text.\] The following {\xsuxfont 𒋛𒄥𒊏𒀀𒌅} tighten the bounds on the sums in the numerator and the denominator in the same way as for the fifth order method. \begin{center} \begin{tabular*}{\textwidth}{l @{\extracolsep{\fill}} r} Expression & Bound on the rounding error\\ \hline $\hphantom{\leftRound{\round{x\pa{y-x^3}}\leftRound{\leftRound{\leftRound{\round{5x^3}+\round{17y}}}}}} \mathllap{\round{5x^3}+\round{17y}}$ & $γ_1$\\ $\hphantom{\leftRound{\round{x\pa{y-x^3}}\leftRound{\leftRound{\round{\round{5x^3}+\round{17y}}}}}} \mathllap{\round{\round{5x^3}+\round{17y}}}$ & $γ_2$\\ $\hphantom{\leftRound{\round{x\pa{y-x^3}}\leftRound{\round{\round{\round{5x^3}+\round{17y}}x^3}}}} \mathllap{\round{\round{\round{5x^3}+\round{17y}}x^3}}$ & $γ_3$\\ $\hphantom{\leftRound{\round{x\pa{y-x^3}}\leftRound{\round{\round{\round{5x^3}+\round{17y}}x^3}}}} \mathllap{\round{\round{\round{5x^3}+\round{17y}}x^3}}+\round{5\round{y^2}}$ & $ι^6 \frac{22γ_3+5γ_2}{27}<\frac{1}{27}γ_{76ι^6}$\\ $\hphantom{\leftRound{\round{x\pa{y-x^3}}\round{\round{\round{\round{5x^3}+\round{17y}}x^3}}+\round{5\round{y^2}}}} \mathllap{\round{\round{\round{\round{5x^3}+\round{17y}}x^3}+\round{5\round{y^2}}}}$ & $\frac{1}{27}γ_{76ι^6+27}$\\ $\hphantom{\leftRound{\round{x\pa{y-x^3}}\round{\round{\round{\round{5x^3}+\round{17y}}x^3}}+\round{5\round{y^2}}}} \mathllap{\round{x\pa{y-x^3}}\round{\round{\round{\round{5x^3}+\round{17y}}x^3}+\round{5\round{y^2}}}}$ & $\frac{1}{27}γ_{76ι^6+2\Multiply27}$\\ $\round{\round{x\pa{y-x^3}}\round{\round{\round{\round{5x^3}+\round{17y}}x^3}+\round{5\round{y^2}}}}$ & $\frac{1}{27}γ_{76ι^6+3\Multiply27}$\\ \end{tabular*} \end{center} This bound is approximately $5.81u$ on the numerator. \begin{center} \begin{tabular*}{\textwidth}{l @{\extracolsep{\fill}} r} Expression & Bound on the rounding error\\ \hline $\hphantom{\leftRound{\round{\round{\round{7x^3} + \round{42 y}}\round{x^6}}+\leftRound{\leftRound{\round{30x^3}+2y}}}} \mathllap{\round{30x^3}+2y}$ & $ι^3\frac{30u}{32}<γ_{\frac{10}{32}ι^3}$\\ $\hphantom{\leftRound{\round{\round{\round{7x^3} + \round{42 y}}\round{x^6}}+\leftRound{\round{\round{30x^3}+2y}}}} \mathllap{\round{\round{30x^3}+2y}}$ & $γ_{\frac{10}{32}ι^3+1}$\\ $\hphantom{\leftRound{\round{\round{\round{7x^3} + \round{42 y}}\round{x^6}}+\leftRound{\round{\round{30x^3}+2y}\round{y^2}}}} \mathllap{\round{\round{30x^3}+2y}\round{y^2}}$ & $γ_{\frac{10}{32}ι^3+2}$\\ $\hphantom{\leftRound{\round{\round{\round{7x^3} + \round{42 y}}\round{x^6}}+\round{\round{\round{30x^3}+2y}\round{y^2}}}} \mathllap{\round{\round{\round{30x^3}+2y}\round{y^2}}}$ & $γ_{\frac{10}{32}ι^3+3}$\\ $\hphantom{\leftRound{\round{\round{\round{7x^3} + \round{42 y}}\round{x^6}}+\round{\round{\round{30x^3}+2y}\round{y^2}}}} \mathllap{\round{\round{\round{7x^3} + \round{42 y}}\round{x^6}}+ \round{\round{\round{30x^3}+2y}\round{y^2}}}$ & $\smash{ι^6\frac{49γ_4+32γ_{\frac{10}{32}ι^3+3}}{81}}<\frac{1}{81}γ_{10ι^9+282ι^6}$\\ $\round{\round{\round{\round{7x^3} + \round{42 y}}\round{x^6}}+\round{\round{\round{30x^3}+2y}\round{y^2}}}$ & $\frac{1}{81}γ_{10ι^9+282ι^6+81}$ \end{tabular*} \end{center} This bound is approximately $4.61u$ on the denominator. We thus have, for the method of order $6$,\[ δ<\frac{1+\frac{1}{27}γ_{76ι^6+3\Multiply27}+γ_1}{1-\frac{1}{81}γ_{10ι^9+282ι^6+81}} <\frac{1+\frac{1}{27}γ_{76ι^6+4\Multiply27}}{1-\frac{1}{81}γ_{10ι^9+282ι^6+81}}\, \] approximately $11.42u$. The resulting bound, using Lagny's irrational method with Canon optimization in step~{\ref{ThirdPrecision}} and rounding to nearest in step~{\ref{RoundedApproximation}}, is\[ \abs{ε_r}<0.00011700493079505^-u\text, \] where again all figures given are correct if the rounding error $\abs{\frac{ε_ξ}{ε_ξ'}-1}$ in the computation of $ξ$ does not exceed $100u$. \subsubsection*{The unreasonable effectiveness of the sixth order rational method} The bound on the rounding error in the sixth order rational method is a little more than $1u$ worse than the one on its fifth order rational method; this is to be expected, as it involves more calculations and does not benefit from an exact multiplication by $16$. Accordingly, since truncation error is negligible before rounding error for both, one might expect that the fifth order method would be superior. Numerical experiments suggest otherwise: with Lagny's irrational method with Canon optimization in step~\ref{ThirdPrecision} followed by directed rounding toward zero in step~\ref{RoundedApproximation}, using the method of order $6$ leads to $4.337(66)$ misroundings per million, whereas the method of order $5$ leads to $4.564(68)$ per million. The bounds on $δ$ may need to be tightened further; for instance, we have not taken any account of rounding errors systematically compensating each other, the error induced by $\round{y^2}$ is the same in the numerator and the denominator, but we bound it as if it had opposite signs. The explanation may however lie elsewhere; the larger number of operations involved for the overall maximal rounding error may lead to lower average error. Whatever the reason may be, the method of order $6$ appears to be preferable should one wish to implement a faithful cube root---they are about equally fast on modern architectures. However, the poorer bound on the maximal error means that we must instead use the method of order $5$ in our correctly-rounded cube root: its better bound means that we go through the ``potential misrounding'' path less often. \section*{Correct rounding} We have $r=\cuberoot{y}\pa{1+ε_r}$, thus\[ \cuberoot{y}\in\intclos{\frac{r}{1+\bar ε_r}}{\frac{r}{1-\bar ε_r}}\DefinitionOf \mathscr{I}\text, \] where $\bar ε_r$ is a bound for $\abs {ε_r}$. Consider the \emph{ties}, \idest, the number halfway betwen $r_0=\round{r}$ and its binary64 successor and the number halfway between $r_0$ and its predecessor. If $\mathscr{I}$ contains neither of the ties, $r_0=\round{\cuberoot{y}}$: the faithful method returned a correct result. This criterion, slightly weakened, may be determined as follows. The difference between $r_0$ and the unrounded $r=x+Δ$ can be computed as described in \cite[224]{Dekker1971}, $r_1\DefineAs x-r_0+Δ$, evaluated as written (both operations are exact). We may compute the potential other candidate for $\round{\cuberoot{y}}$ as $\tilde r\DefineAs\round{r_0+2r_1}$. This value is equal to $r_0$ only if $r_1$ is below a quarter-unit in the last place—in which case the small size of $\mathscr{I}$ ensures that it does not contain a tie. Otherwise, $\tilde r$ is the next representable number above or below $r_0$.\marginfig[Detecting a potential misrounding, in the case $r_1 < 0$.]{\begin{tikzpicture}[scale=1.5] \draw (-1.1,0) -- (1.1,0); \draw (-1,-0.3) node[below] {$\tilde r$} -- (-1,0.2); \draw [dashed] (0,-0.3) node[below] {tie} -- (0,0.75); \draw (0.2,-0.1) node[below] {$r$} -- (0.2,0.6); \draw (1,-0.3) node[below] {$r_0=\round{r}$} -- (1,0.2); \draw [->] (1,0.1) -- (0.2,0.1) node[midway,above] {$r_1$}; \draw (-0.1,0.5) -- (-0.1,0.6) -- (0.5,0.6) node[midway,above]{$\mathscr{I}$} -- (0.5,0.5); \end{tikzpicture}} If $\tilde r\neq r_0$, the tie of interest is $\frac{\tilde r+r_0}{2}$, and we must ascertain whether it lies in $\mathscr I$,\[ \frac{\tilde r+r_0}{2}\in\intclos{\frac{r}{1+\bar ε_r}}{\frac{r}{1-\bar ε_r}}\text,\] subtracting $r_0$ to get rid of the unrepresentable $r$ in the right-hand side,\[ \frac{\tilde r-r_0}{2} \in\intclos{\frac{r_1-\bar ε_rr_0}{1+\bar ε_r}}{\frac{r_1+\bar ε_rr_0}{1-\bar ε_r}}\text,\] subtracting $r_1$ to remove the cancellation,\[ \frac{\tilde r-r_0}{2}-r_1 \in\intclos{-\bar ε_r\frac{r_0+r_1}{1+\bar ε_r}}{\bar ε_r\frac{r_0+r_1}{1-\bar ε_r}}\text.\] The left-hand-side may be computed as written; however the bounds of the interval above are not representable. We must relax them a little,\[ \frac{\tilde r-r_0}{2}-r_1\in \intclos{ -\frac{\bar ε_r}{1+\bar ε_r}\pa{1+u}r_0}{ \frac{\bar ε_r}{1-\bar ε_r}\pa{1+u}r_0}\text.\] On an architecture where multiplications with directed rounding are not more expensive when the surrounding computation uses the roundTiesToEven rounding-direction attribute, these bounds may be used directly, provided that the constants therein have been rounded toward their respective signs, and the multiplication by $r_0$ be similarly rounded. If we restrict ourselves to roundTiesToEven at runtime, we must relax the bounds some more,\[ \frac{\tilde r-r_0}{2}-r_1\in \intclos{ \round{\roundTowardNegative{-\frac{\bar ε_r}{1+\bar ε_r}\pa{1+\frac{2u}{1-u}}}r_0}}{ \round{\roundTowardPositive{\frac{\bar ε_r}{1-\bar ε_r}\pa{1+\frac{2u}{1-u}}}r_0}}\text.\] We may widen this interval slightly into one that is symmetric about $0$, thus requiring only one comparison \[\abs{\frac{\tilde r-r_0}{2}-r_1}\leq \round{τr_0}\text,\] with\begin{align*} τ&\DefineAs\roundTowardPositive{\frac{\bar ε_r}{1-\bar ε_r}\pa{1+\frac{2u}{1-u}}}\\ &=\hex{1.7C73DBBD9FA60}\Multiply 2^{-66}, \end{align*}\marginnote{TODO(egg): Refine these values of $τ$ for the actual bound on the rounding error in $ξ$, to be given in annex~\ref{LagnyStepTwoRounding}.} where $\bar ε_r$ is the bound from (\ref{FaithfulBound}) for Lagny's irrational method with Canon optimization in step~\ref{ThirdPrecision}, rounding toward zero in step~\ref{RoundedApproximation}, and the method of fifth order in step~\ref{HighOrder}, allowing for $100u$ on the computation of $ξ$. If this inequality holds, there may be a misrounding. The correctly-rounded result may then readily be computed using the ordinary cube root algorithm, described, \exempligratia, in Lagny's \cite[286\psqq]{FantetdeLagny1697}, used with binary digits. \subsection*{Extracting a digit} This method extracts a single digit of $\roundTowardZero{\cuberoot{y}}$ at each step. The interval $\mathscr{I}_\pm$ is small enough that it cannot contain both a floating-point number and a tie; thus if $\mathscr{I}_\pm$ contains a tie, we know that $\roundTowardZero{\cuberoot{y}}=\roundTowardZero{r}\DefinitionOf a$\text; we thus have the first $53$ bits already, as\[ a=\min\of{r_0,\tilde r}\text.\] Correct rounding may be achieved by extracting a single additional bit: the number is in excess of the tie---and thus must be rounded up---if and only if that bit is $1$, because, as remarked in \cite[15]{LangMuller2000}, there are no halfway cases for the cube root. In the ordinary cube root method, our remainder is $ρ_{53}\DefineAs y-a^3$, and the next bit is $1$ if and only if the next remainder would be positive with that bit,\[ ρ_{54|1}\DefineAs y-\pa{a+b}^3 = ρ_{53}-3a^2b-3ab^2-b^3\geq 0\text,\] where $b$ is the power of two corresponding to this bit (the difference\footnote{The tie is not representable, but this can instead be computed as half the distance $\abs{r_0-\tilde r}$ between $a$ and $\roundTowardPositive r$.} between $a$ and the tie). Using Veltkamp's\footnote{We have not been able to find a copy of \emph{RC-Informatie}, wherein Veltkamp's work was published; we follow Dekker's account thereof.} algorithm from \cite[234]{Dekker1971} to express $a^2$ as $\pa{a^2}_0+\pa{a^2}_1$ with $\pa{a^2}_0=\round{a^2}$, the remainder is\[ ρ_{53}=y-\pa{\pa{a^2}_0+\pa{a^2}_1}a\text; \] with two more applications of Veltkamp's algorithm to the products $\pa{a^2}_0a$ and $\pa{a^2}_1a$,\[ ρ_{53}=y-\pa{a^3}_{00}-\pa{a^3}_{01}-\pa{a^3}_{10}-\pa{a^3}_{11}\text, \] where the first subtraction is representable by Sterbenz's lemma. The remainder cannot have more digits than two plus twice the number of digits computed for the cube root, \idest, $2\Multiply53+2$ significant bits, for otherwise the ``digit'' $2=\bin{10}$ would fit in the place $b$. We can thus express it as \[ ρ_{53}=ρ_{53;0}+ρ_{53;1}+ρ_{53;2}\text, \] where the significands of the $ρ_{53;i}$ do not overlap, with ample room to spare. We may do so using the algorithms of Priest and Nievergelt, specifically, using algorithm~10 (sum of two sum-and-roundoff expansions) followed by algorithm~8 (modification of Priest's renormalization) from \cite[427,425]{Nievergelt2004} to compute the sum of the double-precision numbers $y-\pa{a^3}_{00}-\pa{a^3}_{01}$ and $-\pa{a^3}_{10}-\pa{a^3}_{11}$; the renormalization will return an expansion with at most three parts. The terms $3a^2b$, $3ab^2$, and $b^3$ may then be subtracted exactly while retaining triple precision: they add at most three significant digits. The first two of these terms should be split into representable parts,\begin{align*} 3a^2b&=2\pa{a^2}_0b+\pa{a^2}_0b+2\pa{a^2}_1b+\pa{a^2}_1b\text,\\ 3ab^2&=2ab^2+ab^2\text, \end{align*} and each of these parts should be subtracted using the same algorithms. The sign of $ρ_{54|1}$ may then be checked. \part*{Conclusion} In part~\ref{Abridged}, we have described and generalized a family of root-finding methods of arbitrary order. In part~\ref{CubeRoot}, we have used these methods to construct nearly correct faithfully-rounded implementations for the binary64 cube root, and we have described how their results may be adjusted to yield a correctly-rounded implementation, which to our knowledge had yet to be done. Other applications of these root-finding methods are outlined in appendix~\ref{ExampleGeneralizedLagnyMethods}. While we only used pre-existing methods, either Lagny's or Schröder's, in the foregoing implementation of a cube root, the implementation with FMA, described in appendix~\ref{FMA}, makes use of the new generalized irrational methods. The implementation choices that involve performance considerations are decided in appendix~\ref{Performance}; an explanation is given therein for the somewhat surprising speed of the irrational methods. The resulting implementations are compared with pre-existing ones in appendix~\ref{Comparison}. \vfill \pagebreak \appendix \part*{Appendices} \section{Proof of the equivalence of Lagny's rational method and Schröder's $\pa{A^0_{p-1}}$} \label{ProofOfTheProposition} We now prove the proposition from part~\ref{Abridged}, which, substituting the definition of the generalized Lagny rational method, is that \[x+\tfrac{1}{2}a = a + \pa{p-1}\frac{\pa{1/f}^{\pa{p-2}}\of{a}}{\pa{1/f}^{\pa{p-1}}\of{a}}\DefinitionOf ψ\of{a}\] if $x$ is the root of $E_1$. This can be expressed equivalently, and more conveniently for the proof, as \[a+Δ = a+\pa{p-1}\frac{\pa{1/f}^{\pa{p-2}}\of{a}}{\pa{1/f}^{\pa{p-1}}\of{a}}\DefinitionOf ψ\of{a}\] if $Δ$ is the root of $\tilde E_1$. \begin{proof} Let $\tilde E_p = d_0 Δ^p + \dotsb + d_p$, $\tilde E_{p-1} = e_0 Δ^{p-1} + \dotsb + e_{p-1}$, and As shown in \cite[52--54]{Householder1970}, the polynomial remainders $E_k$ are given up to a constant factor by \cite[19]{Householder1970} equation (23), \idest, for some $α$,\[ \frac{\tilde E_n}{α_n} = \bigradient{\tilde E_p}{p-1-n}{\tilde E_{p-1}}{p-n}\text, \] where the expression on the right-hand side is the \emph{bigradient} defined in \cite{Householder1968} (3.4) or \cite[19]{Householder1970} (20),\[ \frac{\tilde E_n}{α_n} = \det\begin{pmatrix} d_0 & d_1 & d_2 & \multicolumn{3}{c}{\cdots} & d_{2\pa{p-n}-3} & Δ^{p-n-2} E_p \\ & d_0 & d_1 & \multicolumn{3}{c}{\cdots} & d_{2\pa{p-n}-4} & Δ^{p-n-3} E_p \\[1ex] & & \ddots & & & & \vdots \\[1ex] & & & d_0 & d_1 & \cdots & d_{p-n-1} & Δ^{0} E_p \\ \zeroBL\huge& & & & e_0 & \cdots & e_{p-n-2} & Δ^{0} E_{p-1} \\ & & & e_0 & e_1 & \cdots & e_{p-n-1} & Δ^{1} E_{p-1} \\[1ex] & & \adots & & & & & \vdots \\[1ex] & e_0 & e_1 & \multicolumn{3}{c}{\cdots} & e_{2\pa{p-n}-4} & Δ^{p-n-2} E_{p-1} \\ e_0 & e_1 & e_2 & \multicolumn{3}{c}{\cdots} & e_{2\pa{p-n}-3} & Δ^{p-n-1} E_{p-1} \\ \end{pmatrix}\DefinitionOf\det\tilde{\matE}_k\text, \] with $p-1-n$ rows of the $d_k$ and $p-n$ of the $e_k$, where $d_k\DefineAs0$ for $k>p$, and $e_k\DefineAs0$ for $k>p-1$. In particular, for $n=1$,\[ \tilde{\matE}_1=\begin{pmatrix} d_0 & d_1 & d_2 & \cdots & d_{p-2} & d_{p-1} & d_p & & &\zeroTR\huge& Δ^{p-3} E_p \\ & d_0 & d_1 & \cdots & d_{p-3} & d_{p-2} & d_{p-1} & d_{p} & & & Δ^{p-4} E_p \\[1ex] & & \ddots & & & & & & & \ddots & \vdots \\[1ex] & & & d_0 & d_1 & d_2 & \multicolumn{3}{c}{\cdots} & d_{p-2} & Δ^{0} E_p \\ \zeroBL\huge& & & & e_0 & e_1 & \multicolumn{3}{c}{\cdots} & e_{p-3} & Δ^{0} E_{p-1} \\ & & & e_0 & e_1 & e_2 & \multicolumn{3}{c}{\cdots} & e_{p-2} & Δ^{1} E_{p-1} \\[1ex] & & \adots & & & & & & & \adots & \vdots \\[1ex] & e_0 & e_1 & \cdots & e_{p-3} & e_{p-2} & e_{p-1} & & & & Δ^{p-3} E_{p-1} \\ e_0 & e_1 & e_2 & \cdots & e_{p-2} & e_{p-1} & & & &\zeroBR\huge& Δ^{p-2} E_{p-1} \\ \end{pmatrix}\text, \] with $p-2$ rows of the $d_k$ and $p-1$ of the $e_k$. Observe that since the value of $Δ$ used in the rational method is the root of $\tilde E_1$, for that value of $Δ$, $\det \tilde{\matE}_1 = 0$, \idest, $\tilde{\matE}_1$ is singular. \begin{lemma} The matrix $\tilde{\matE}_1$ is singular if and only if $\matC\of{a+Δ}$ is singular, where\[ \matC\of{Ψ}\DefineAs\begin{pmatrix} Ψ - a & f_0 &&&\zeroTR\huge\\ -1 & f_1 & f_0 \\ 0 & f_2 & & \ddots \\ \vdots & \vdots & \ddots & & f_0 \\ 0 & f_{p-1} & \cdots & f_2 & f_1 \end{pmatrix} \] and $f_k\DefineAs \frac{f^{\pa{k}}\of{a}}{\Factorial k}$. \begin{proof} Observe that by the definition of $\tilde E_p$ and $\tilde E_{p-1}$, $d_k=f_{p-k}$ and $e_k=f_{p-1-k}$, so that\[ \tilde{\matE}_1=\begin{pmatrix} f_p & f_{p-1} & f_{p-2} & \cdots & f_2 & f_1 & f_0 & & &\zeroTR\huge& Δ^{p-3} E_p \\ & f_p & f_{p-1} & \cdots & f_3 & f_2 & f_1 & f_0 & & & Δ^{p-4} E_p \\[1.75ex] & & \ddots & & & & & & & \ddots & \vdots \\[1.75ex] & & & f_p & f_{p-1} & f_{p-2} & \multicolumn{3}{c}{\cdots} & f_2 & Δ^{0} E_p \\ \zeroBL\huge& & & & f_{p-1} & f_{p-2} & \multicolumn{3}{c}{\cdots} & f_2 & Δ^{0} E_{p-1} \\ & & & f_{p-1}& f_{p-2} & f_{p-3} & \multicolumn{3}{c}{\cdots} & f_1 & Δ^{1} E_{p-1} \\[1ex] & & \adots & & & & & & & \adots & \vdots \\[1ex] & f_{p-1} & f_{p-2} & \cdots & f_2 & f_1 & f_0 & & & & Δ^{p-3} E_{p-1} \\ f_{p-1} & f_{p-2} & f_{p-1} & \cdots & f_1 & f_0 & & & &\zeroBR\huge& Δ^{p-2} E_{p-1} \\ \end{pmatrix}\text. \] Note that $\tilde E_p-\tilde E_{p-1}=f_pΔ^p$. Subtracting the penultimate row from the first, the antepenultimate from the second, etc., the determinant is that of\[ \begin{pmatrix} f_p & & & & & & & & &\zeroTR\huge& Δ^{2p-3} f_p \\ & f_p & & & & & & & & & Δ^{2p-4} f_p \\[1.75ex] & & \ddots & & & & & & & & \vdots \\[1.75ex] & & & f_p & & & && & & Δ^{p} f_p \\ \zeroBL\huge& & & & f_{p-1} & f_{p-2} & \multicolumn{3}{c}{\cdots} & f_2 & Δ^{0} E_{p-1} \\ & & & f_{p-1}& f_{p-2} & f_{p-3} & \multicolumn{3}{c}{\cdots} & f_1 & Δ^{1} E_{p-1} \\[1ex] & & \adots & & & & & & & \adots & \vdots \\[1ex] & f_{p-1} & f_{p-2} & \cdots & f_2 & f_1 & f_0 & & & & Δ^{p-3} E_{p-1} \\ f_{p-1} & f_{p-2} & f_{p-1} & \cdots & f_1 & f_0 & & & &\zeroBR\huge& Δ^{p-2} E_{p-1} \\ \end{pmatrix}\text. \] Since $\det \tilde{\matE}_1$ is a polynomial of degree $1$, all terms divisible by $Δ^2$ must cancel out in the Laplace expansion\footnote{ \cite[294--304]{Laplace1772}, reprinted in \volcite{8}[395\psqq]{Laplace1878}.} of the determinant of the above matrix in the last column, so that \[\det \tilde{\matE}_1 = \pm \pa{f_0 + f_1Δ}\det \matA \mp f_0Δ \det \matB\text,\] where\[ \matA\DefineAs\pa{\begin{array}{c|c} f_p \Identity_{p-2} & \nullmat \\ \hline \matX & \begin{matrix} f_{p-2} & \multicolumn{2}{c}{\cdots} & f_1\\ f_{p-3} & \multicolumn{2}{c}{\cdots} & f_0\\ \vdots & & \adots \\ f_1 & f_0 & & \zeroBR\LARGE \end{matrix} \end{array}} \text{ and } \matB\DefineAs\pa{\begin{array}{c|c} f_p \Identity_{p-2} & \nullmat \\ \hline \matY & \begin{matrix} f_{p-1} & \multicolumn{2}{c}{\cdots} & f_2\\ f_{p-3} & \multicolumn{2}{c}{\cdots} & f_1\\ \vdots & & \adots \\ f_1 & f_0 & & \zeroBR\LARGE \end{matrix} \end{array}}\text, \] where the matrices $\matX$ and $\matY$ are of no consequence for the determinant: \[\frac{\det \tilde{\matE}_1}{f_p^{p-2}} = \pm \pa{f_0 + f_1Δ}\det \matA' \mp f_0Δ \det \matB'\text,\] where the matrices $\matA'$ and $\matB'$ are the bottom right blocks $\matA$ and $\matB$ respectively. A few Laplace expansions of\[ \det\matC\of{a+Δ}=\det\begin{pmatrix} Δ & f_0 &&&\zeroTR\huge\\ -1 & f_1 & f_0 \\ 0 & f_2 & & \ddots \\ \vdots & \vdots & \ddots & & f_0 \\ 0 & f_{p-1} & \cdots & f_2 & f_1 \end{pmatrix}\] finish the proof; along the first column,\[ \det\matC\of{a+Δ} = Δ\det\begin{pmatrix} f_1 & f_0 && \zeroTR\LARGE\\ f_2 & & \ddots \\ \vdots & \ddots & & f_0 \\ f_{p-1} & \cdots & f_2 & f_1 \end{pmatrix} +\det\begin{pmatrix} f_0 &&&&\zeroTR\LARGE\\ f_2 & f_1 & f_0 \\ f_3 & f_2 & & \ddots \\ \vdots & & & & f_0 \\ f_{p-1} & f_{p-2} & & f_2 & f_1 \end{pmatrix}\text, \] along the first row of both matrices,\[ \det\matC\of{a+Δ} = Δ\pa{f_1\det\downrightcurvedarrow\matA'-f_0\det\downrightcurvedarrow\matB'}+ f_0\det\downrightcurvedarrow{\matA}'\text, \] where $\downrightcurvedarrow{\matZ}'$ is obtained by reversing the order of the columns of the transpose of $\matZ$, so that\[ \det\matC\of{a+Δ}=\pm\frac{\det \tilde{\matE}_1}{f_p^{p-2}}\text. \] \end{proof} \end{lemma} The proposition follows from the lemma and theorem 4.4.2 from \cite[169]{Householder1970}: $ψ\of{a}$ is Householder’s (14) with $g\Identically 1$; for that value of $g$, theorem 4.4.2 states that (14) is the solution of (12) from the same page, which is $\det\matC\of{ψ\of{a}}=0$. By the lemma, for the value of $Δ$ in the rational method, $a+Δ$ solves that equation. \end{proof} \section{Some applications of the generalized Lagny methods} As previously mentioned, the rational methods are well known; we list them solely so that they may be compared with the irrational ones. \label{ExampleGeneralizedLagnyMethods} \subsection{Formulæ for the cube root} Let $f\of z\DefineAs y-z^3$, and $a$ be the starting estimate for the root of $f$, \idest, for $\cuberoot y$. Let $b\DefineAs f\of{a}=y-a^3$. Let $ε\DefineAs \frac{a}{\sqrt[3] y}-1$. We have the following formulæ. \subsubsection{Rational methods} The rational methods of orders $2$ through $5$ are given below. The first two of these are Newton's method and Lagny's rational method. \begin{center} \begin{tabular}{lr} Iteration & Asymptotic relative error\\ \hline $a+\frac{b}{3a^2}$ & $ε^2+\BigO\of{ε^3}$ \\ $a+\frac{ab}{3a^3+b}$ & $\tfrac{2}{3}ε^3+\BigO\of{ε^4}$\\ $a+\frac{3ab\pa{3a^3+b}}{27a^6+18a^3b+b^2}$ & $\tfrac{1}{3}ε^4+\BigO\of{ε^5}$\\ $a+\frac{b\pa{27a^6+18a^3b+b^2}}{81a^8+81a^5b+15a^2b^2}$ & $\tfrac{1}{9}ε^5+\BigO\of{ε^6}$\\ \end{tabular} \end{center} \subsubsection{Quadratic irrational methods} The quadratic irrational methods of orders $3$ through $5$ are given below. The first of these is Lagny's irrational method, for which we give the form from \cite{FantetdeLagny1691a}. \begin{center} \begin{tabular}{lr} Iteration & Asymptotic relative error\\ \hline $\frac{3a^2+\sqrt{9a^4+12ab}}{6a}=\tfrac{1}{2}a+\sqrt{\tfrac{1}{4}a^2+\frac{b}{3a}}$ & $-\tfrac{1}{3}ε^3+\BigO\of{ε^4}$ \\ $\frac{3a^3-b+\sqrt{81a^6 + 90a^3b+b^2}}{12a^2}$ & $-\tfrac{1}{9}ε^4+\BigO\of{ε^5}$ \\ $a\frac{-5b+\sqrt{3\pa{108a^6+108a^3b-5b^2}}}{18a^3-2b}$ & $-\tfrac{1}{18}ε^5+\BigO\of{ε^6}$ \\ \end{tabular} \end{center} \subsection{Formulæ for the fifth root} Let $f\of z\DefineAs y-z^5$, and $a$ be the starting estimate for the root of $f$, \idest, for $\sqrt[5] y$. Let $b\DefineAs f\of{a}=y-a^3$. Let $ε\DefineAs \frac{a}{\sqrt[5] y}-1$. We have the following formulæ. \subsubsection{Rational methods} The rational methods of orders $2$ through $5$ are given below. The first of these is Newton's method, the second is Halley's rational method, the last is Lagny's rational method. \begin{center} \begin{tabular}{lr} Iteration & Asymptotic relative error\\ \hline $a+\frac{b}{5a^4}$ & $2ε^2+\BigO\of{ε^3}$ \\ $a+\frac{ab}{5a^5+2b}$ & $2ε^3+\BigO\of{ε^4}$\\ $a+\frac{ab\pa{5a^5+2b}}{25a^{10}+20a^5b+2b^2}$ & $ε^4+\BigO\of{ε^5}$\\ $a+\frac{ab\pa{25a^{10}+20a^5b+2b^2}}{125a^{15}+150a^{10}b+40a^5b^2+b^3}$ & $-\tfrac{1}{5}ε^5+\BigO\of{ε^6}$\\ \end{tabular} \end{center} \subsubsection{Quadratic irrational methods} The quadratic irrational methods of orders $2$ through $5$ are given below. The first of these is Halley's irrational method, for which we give the form from \cite[141]{Halley1694}, the last is Lagny's quadratic irrational method. \begin{center} \begin{tabular}{lr} Iteration & Asymptotic relative error\\ \hline $\frac{15a^4+\sqrt{25a^8+40a^3b}}{20a^3}=\tfrac{3}{4}a+\sqrt{\tfrac{1}{16}a^2+\frac{b}{10a^3}}$ & $-\tfrac{1}{3}ε^3+\BigO\of{ε^4}$ \\ $\frac{5a^7-a^2b+a^2\sqrt{25a^{10}+30a^5b+b^2}}{10a^6}$ & $-ε^4+\BigO\of{ε^5}$ \\ $a\frac{-7b+\sqrt{100a^{10}+100a^5b-7b^2}}{10a^5-4b}$ & $-\tfrac{7}{10}ε^5+\BigO\of{ε^6}$ \\ \end{tabular} \end{center} \subsubsection{Quartic irrational method} Lagny's quartic irrational method is given below, in the form from \cite{FantetdeLagny1692} (with the misprint corrected). The quartic methods involve the solution of a quartic equation, so they are often impractical; in this case however, the equation $E_4=0$ is biquadratic. \begin{center} \begin{tabular}{lr} Iteration & Asymptotic relative error\\ \hline $\tfrac{1}{2}a+\sqrt{\sqrt{\tfrac{1}{4}a^4+\tfrac{b}{5a}}-\tfrac{1}{4}a^2}$&$-\tfrac{1}{5}ε^5+\BigO\of{ε^6}$ \end{tabular} \end{center} \subsection{Formulæ for the resolution of Kepler's equation} Let $f\of E\DefineAs E - e \sin E - M$; $f\of E=0$ is Kepler's equation\footnote{From \cite[295--300]{Kepler1609}. Kepler writes of this equation: \begin{quote}\marginnote{% It is enough for me to believe that it cannot be solved \emph{a priori}, because of the heterogeneity of the arc and the sine. If I am wrong, whoever will have shown me the way will be for me the great Ἀπολλώνιος.} Mihi ſufficit credere, ſolvi a priori non poſſe, propter arcus \& ſinus ἑτερογένειαν. Erranti mihi, quicunque viam monſtraverit, is erit mihi magnus Apollonius. \end{quote} It appears that this honour belongs either to Lagrange, with a power series in $e$ whose coefficients involve derivatives of powers the sine, given in \cite[209]{Lagrange1771} and reprinted in \volcite{3}[117]{Lagrange1867}, or to Bessel, with a sine series in $M$ whose coefficients involve his eponymous functions, given in an 1818 letter to Olbers \cite{Bessel1852}; see \cite{Colwell1992}.}, where $E$ is the eccentric anomaly, $M$ the mean anomaly, and $e$ the eccentricity of an elliptic orbit. We give the iterations for a starting estimate $α$ of the root $E$. Since $E$ is an angle and $M$ a fictitious angle, both in $\intclos{0}{2\Pi}$, we consider absolute errors rather than relative ones. With this two-parameter transcendental function, the two-parameter asymptotics are not particularly useful. Instead we give the approximate maximal error over $M\in\intclos{0}{2\Pi}$ for several values of $e$, and for the specific choice of starting estimate $α=M$, which greatly simplifies the higher-order formulæ. The errors of the starting estimate $α=M$ are given in the table below. \begin{center} \begin{tabular}{lrrrr} Approximation of $E$ & Max.~error for $e=0.2$ & $e=0.5$ & $e=0.9$ & $e=0.999$ \\ \hline $M$ & $10°37'$ & $28°34'$ & $51°34'$ & $57°14'$ \end{tabular} \end{center} For brevity, we write $c\DefineAs \cos α$, $s\DefineAs \sin α$. \subsubsection{Rational methods} The rational methods of orders $2$ through $4$ are given below; the first two of these are Newton's method and Halley's rational method. \begin{center} \begin{tabular}{l} Iteration\\ \hline $α+\frac{M-α+es}{1-ec}$ \\ $α+\frac{2\pa{1-ec}\pa{M-α+es}}{2-4ec+e^2c^2-eαs+eMs+e^2}$ \\ $α+\frac{3\pa{M-a+es}\pa{2-4ec+e^2c^2-eαs+eMs+e^2}}{6\pa{1+e^2+2e^2c^2-c^3e^3-eαs+eMs}+e\pa{\pa{α-M}^2-18+4eαs-4eMs-5e^2s^2}c}$ \end{tabular} \end{center} For $α=M$, we have the following simplified formulæ. \begin{center} \begin{tabular}{lrrrr} Approximation of $E$ & Max.~error for $e=0.2$ & $e=0.5$ & $e=0.9$ & $e=0.999$ \\ \hline $M+\frac{es}{1-ec}$ & $14'11^\dprime\hphantom{00}$ & $4°27'\hphantom{00\dprime}$ & $68°32'$ & $1246°\hphantom{00'}$\\ \rlap{$M+\frac{2\pa{1-ec}es}{2-4ec+e^2c^2+e^2}$} & $21\dprime\llap.43$ & $22'35\dprime$ & $13°07'$& $38°44'$ \\ \rlap{$M+\frac{3es\pa{2-4ec+e^2c^2+e^2}}{6\pa{1+2e^2c^2-e^3c^3+e^2}-e\pa{18+5e^2s^2}c}$} & $3\dprime\llap.03$ & $7'56\dprime$ & $10°30'$& $27°20'$\\ \end{tabular} \end{center} \subsubsection{Quadratic irrational methods} The quadratic irrational methods of orders $3$ and $4$ are given below. For the method of order $4$, the sign $\pm$ should be positive when $M<\Pi$, and negative when $M>\Pi$. \begin{center} \begin{tabular}{l} Iteration\\ \hline $α+\frac{ec-1+\sqrt{\pa{1-ec}^2+2es\pa{M-α+es}}}{es}$ \\ $α+\frac{\pa{α-M+2es}c-3s\pm\sqrt{\pa{α-M}^2c^2-2\pa{4ec-3}\pa{α-M}sc+\pa{3-4ec}^2s^2+18e\pa{M-α}s^3+18e^2s^4}}{2c\pa{ec-1}+3es^2}$ \end{tabular} \end{center} For $α=M$, we have the following simplified formulæ. \begin{center} \begin{tabular}{lrrrr} Approximation of $E$ & Max.~error for $e=0.2$ & $e=0.5$ & $e=0.9$ & $e=0.999$ \\ \hline \rlap{$M+\frac{ec-1+\sqrt{\pa{1-ec}^2+2e^2s^2}}{es}$} & $24\dprime\llap.37$ & $24'38\dprime$ & $10°54'$& $53°25'$\\ \rlap{$M+\frac{2esc-3s\pm\sqrt{\pa{3-4ec}^2s^2+18e^2s^4}}{2c\pa{ec-1}+3es^2}$} & $3\dprime\llap.06$ & $8'31\dprime$ & $14°14'$& $27°23'$ \end{tabular} \end{center} \subsubsection{Cubic irrational method} For the extraction of the roots of polynomials, it makes little sense to consider the cubic irrational methods, whose computation involves the extraction of a cube root. However, when it comes to functions whose derivatives involve trigonometric lines, a cube root is no longer obviously prohibitive. The approxmiation obtained from the method of degree $3$ and order $4$ with the starting estimate $α=M$ is given below; its error is remarkably low even at high eccentricities (an order of magnitude better than either the rational or the quadratic method of the same order for $e\geq 0.9$). \begin{center} \begin{tabular}{lrrrr} Approximation of $E$ & Max.~error for $e=0.2$ & $e=0.5$ & $e=0.9$ & $e=0.999$ \\ \hline $M+\frac{\frac{2c\pa{ec-1}+es^2}{R}+\frac{R}{e}-s}{c}$ & $2\dprime\llap.71$ & $4'04\dprime$ & $58'33\dprime$& $1°29'$ \end{tabular} \end{center} In the above formula,\[ R\DefineAs\sqrt[3]{3e^2sc-e^3s^3+\sqrt{e^3c^2\pa{8\pa{1-ec}^3-3es^2\pa{1+4ec\pa{ec-2}}-6e^3s^4}}}\text. \] This formula is applicable only if the expression under the square root is nonnegative. This is the case from $M=0$ to approximately $M=85°$, $M=79°$, $M=70°$, and $M=68°$ respectively for the eccentricities considered. Note that with the argument reduction\[ E\of{M}=\Pi-E\of{\Pi-M}\text,\] the same formula may be used for the values of $M$ near the upper end of the interval $\intclos{0}{2\Pi}$. In between, the method involves \emph{casus irreducibiles}\footnote{See \cite[469]{FantetdeLagny1697} for a definition of the \emph{cas irreductible} of a cubic; see \cite[125\psqq]{Wantzel1843} for a proof of its irreducibility to real roots.} rather than real cube roots. This is however not a fundamental impediment: just like a cube root may be efficiently computed with generalized Lagny methods, so can a \emph{casus irreducibilis}; indeed Lagny gives the formulæ of order $3$ and degrees $1$ and $2$ for those cases in \cite[41]{FantetdeLagny1692}. It may be convenient to implement the function $\cos\frac{1}{3}\arccos$, with which the roots of any such case may be expressed. \section{FMA} \label{FMA} The overall strategy is different with FMA, since we need only round $x$ to half the precision ($26$~bits), rather than a third; this is because the expression $y-x^3$ may be computed\footnote{To the best of our knowledge, the first implementation of a cube root to make use of that possibility is a 2014 Stack Overflow answer by njuffa; see appendix~\ref{Comparison}.} as $\round{y-\pa{x^2}x}$, requiring only an exact square. This means that $ε_ξ'$ should ideally be somewhat less than $2^{-25}$ or $2^{-26}$ depending on the manner in which $x$ is rounded. Even with Canon optimization, Lagny's irrational method cannot achieve that from $q$. The error of the Lagny--Schröder rational method of order $4$ reaches below $2^{-21}$, and that of the generalized Lagny quadratic irrational method of order $4$ below $2^{-23}$. It seems that Canon optimization on the latter can readily bring down its error below $2^{-24}$, but not much further. Both the rational and quadratic irrational methods of order $5$ have errors below $2^{-28}$; little stands to be gained from optimization there. Conversely the computation of $r_0$ from $x$ may use a lower-order method, since it starts from $26$ bits rather than $17$, but still cannot add more than $53$; a fourth-order method suffices to make the truncation error negligible; again we use the rational method for the last step. Of course, besides allowing for a correctly-rounded $y-x^3$ from an $x$ with an exact square, FMA improves performance and rounding errors in all steps; even the slow path benefits from it, as the exact products may be computed with a multiplication and an FMA, $a^2_0=\round{a^2}$, $a^2_1=\round{a^2-a_0}$, instead of Veltkamp's algorithm. One notable difference comes from the evaluation of the final correction step (\ref{AdditiveCorrection}) $x+Δ$; it is clear that one should fuse the sum with a product in $Δ$, thus evaluating $r_0\DefineAs\round{x+Δ_1Δ_2}$. However, $x+Δ_1Δ_2$ is then no longer representable as the sum of two floating-point numbers. Instead, we define $r_1\DefineAs\round{x-r_0+Δ_1Δ_2}$, and $r\DefineAs r_0+r_1$. This $r$ errs from $x+Δ_1Δ_2$ by at most $\abs{r_1u}$ (the rounding error on $r_1$), which is at most $\round{x+Δ_1Δ_2}u^2$. This must be factored into the error $ε_r$ of $r$ from (\ref{MisroundingError}), which becomes \[ε_r\DefineAs \pa{1+ε_r'+\pa{ε_r'-ε_x}δ}\pa{1+u^2}-1\text.\] With\begin{align*} Δ_1 &= \round{6x \pa{x^2} + \round{3y}}\text{ and}\\ Δ_2 &= \round{\frac{\round{x\round{y-\pa{x^2}x}}} {\round{\round{x^3}\round{10x\pa{x^2}+16y}+\round{y^2}}}}\text, \end{align*} where $x^2$, $6x$, and $10x$ are exact and $x^3$ correctly rounded thanks to the trailing $0$s of $x$, $Δ_1$ and $Δ_2$ incur respective rounding errors of less than $γ_2$ and $\smash{\frac{1+γ_2+γ_1}{1-γ_3}-1<\frac{γ_6}{1-γ_3}}$, thus $δ<\frac{γ_8}{1-γ_3}$. The threshold for misrounding detection then becomes \[τ=\hex{1.E45E16EF5480F}\Multiply 2^{-76}\] for the quadratic irrational method of degree $5$ in step~\ref{ThirdPrecision}, rounding toward zero in step~\ref{RoundedApproximation}, and the method of fifth order in step~\ref{HighOrder}, allowing for $100u$ on the computation of $ξ$. \section{Performance considerations} \label{Performance} While the irrational methods, being the solutions equations of the second degree, are of the form \[\frac{\mathfrak{a}\pm\sqrt{\mathfrak{b}}}{\mathfrak{c}}\text,\]where the letters stand for polynomial expressions, the degree of $\mathfrak{c}$ is much lower than that of the denominator of the rational methods; a division may thus be scheduled early, and its result multiplied at the end, for a latency similar to that of the corresponding rational methods. This is not unlike the reason for Halley's preference for his irrational method over his rational one in \cite[140]{Halley1694}: \marginnote{\footnotesize Translation from \cite{Halley1809}.\\ And this formula is deservedly preferred before the rational one, which, on account of its large divisor, cannot be used without much trouble, in comparison of the irrational one, as manifold experience has informed me.}\begin{quote} Hæc vero formula merito præferenda eſt rationali, ob ingentem di\-vi\-ſo\-rem, non fine magno labore tractandum; cum Lateris quadrati extractio multo facilius procedat, ut experientia multiplex me docuit. \end{quote} While in Halley's case the issue is not the delay in scheduling the division (hand computation by a single human computer is not pipelined) but the latency of a division by a large number, both ultimately boil down to the denominator being of smaller degree---and thus, for Halley, having fewer digits---in the irrational method. \subsection*{Without FMA} In the second step $\FunctionBody{q}{ξ}$, where rounding errors are not a concern, Lagny's rational method is best evaluated as\[ ξ = \round{\frac{\round{\round{q^2}\round{q^2}}+\round{2yq}} {\round{2\round{\round{q^2}q}+y}}}\text. \] On Ivy Bridge\footnote{We thank Peter Barfuss for running some benchmarks for us on a machine with an Ivy Bridge processor.}, the irrational method evaluated as\[ ξ = \round{\round{\round{\round{\sqrt{3}}\round{q^2}}+ \round{\sqrt{\round{\round{4yq}-\round{\round{q^2}\round{q^2}}}}}}\round{\frac{\round{1/\sqrt{12}}}{q}}} \] has about the same latency as the rational method. With Canon optimization, the constants $\round{\sqrt{3}}$, $4$, and $\round{1/\sqrt{12}}$ should be replaced with\begin{align*} \round{\frac{κ}{\sqrt{\tfrac{1}{μ}-λ}}}&=\hex{1.BBA02BAFEA9B7}\text,\\ \round{\frac{1}{1-λμ}}&=\hex{1.0030F1F8A11DA}\Multiply2^2\text{, and}\\ \round{\sqrt{\tfrac{1}{μ}-λ}}&=\hex{1.2774CDF81A35}\Multiply2^{-2} \end{align*} respectively. Using Lagny's irrational method with Canon optimization in step~\ref{ThirdPrecision}, directed rounding toward zero in step~\ref{RoundedApproximation}, and the method of fifth order in step~\ref{HighOrder}, the rate of passage in the slow ``potential misrounding'' path is $2.6480(52) \Multiply 10^{-4}$; the latency of that slow path is less than ten times that of the fast path, so that any improvement to the rate of passage in the slow path that comes at a cost of even a cycle to the latency of the fast path worsens average-case performance. As a result, we do not use rounding to nearest in step~\ref{RoundedApproximation}, as it replaces, on the critical path, a bitwise and by a sequential multiplication, subtraction, and addition. \subsection*{With FMA} The rational method of order $5$ may be evaluated with FMA as\[ ξ = \round{ \frac{ \round{\round{\round{\round{q^2}q}\round{\round{q^2}q}} \round{\round{\round{5q}\round{q^2}+\round{45 y}}}+ \round{\round{\round{30q}\round{q^2}+y}\round{y^2}}}} {\round{\round{\round{\round{q^2}q}\round{q^2}} \round{\round{15q}\round{q^2}+\round{51y}} + \round{\round{15 \round{y^2}} \round{q^2}}}}}\text. \] The quadratic irrational method of the same order may be evaluated with similar performance on Skylake and Kaby Lake if it is rewritten as\[ \frac{q}{\tfrac{20}{\sqrt{15}}q^3-\tfrac{2}{\sqrt{15}}y}\pa{\sqrt{-q^6+\tfrac{118}{5}q^3y-y^2}+\tfrac{5}{\sqrt{15}}\pa{q^3-y}} \]and evaluated as\[ ξ = \round{d \round{\sqrt{\round{\round{\round{q^2}q} \round{-\round{q^2}q+\round{\round{\tfrac{118}{5}}y}}+ \round{y^2}}}}+ \round{d\round{\round{\tfrac{5}{\sqrt{15}}}\round{\round{q^2}q-y}}}}\text, \] where\[ d\DefineAs\round{\frac{q}{\round{\round{q^2}\round{\round{\tfrac{20}{\sqrt{15}}}q}-\round{\round{\tfrac{2}{\sqrt{15}}}y}}}}\text. \] Using the quadratic irrational method in step~\ref{ThirdPrecision}, directed rounding toward zero in step~\ref{RoundedApproximation}, and the rational method of fourth order in step~\ref{HighOrder}, the rate of passage in the slow ``potential misrounding'' path is $3.05(18)\Multiply10^{-7}$. Again this means that rounding to nearest in step~\ref{HighOrder} would be detrimental to the average-case performance of the correctly-rounded cube root. \section{Rounding error analysis for the second step} \label{LagnyStepTwoRounding} TODO(egg): analysis. \section{Comparison with existing implementations} \label{Comparison} To our knowledge, no correctly-rounded implementations of the binary64 cube root have been published so far. We compare the performance and misrounding rates of some faithful methods described in this document with those of a number of pre-existing implementations of the cube root that we were able to find. \begin{figure}[t!] \begin{adjustwidth}{-4cm}{}\hsize=\linewidth \includegraphics[width=\linewidth]{cbrt_latency_misrounding.pdf} \caption{Latency of several implementations of the binary64 cube root, plotted against their misrounding rates, for various architectures. The error bars are expanded uncertainties determined from the combined standard uncertainties and a coverage factor of $3$. The letters $\mathrm{FMA}$ mark methods that use FMA; an obelisk marks unfaithful methods.} \end{adjustwidth} \end{figure} We consider the following method among our faithful ones, where the notation $p^qr^s$ means the generalized Lagny method of degree $q$ and order $p$ is used in step~\ref{ThirdPrecision} and the generalized Lagny method of degree $r$ and order $s$ is used in step~\ref{HighOrder}, a subscript $\mathrm{C}$ indicates Canon optimization, the letters $\mathrm{N}$ and $\mathrm{Z}$ respectively indicate the use of rounding to nearest and directed rounding toward zero in step~\ref{RoundedApproximation}, and the letters $\mathrm{FMA}$ indicate the use of FMA as described in appendix~\ref{FMA}: \begin{itemize}[nosep] \item method $3^1\mathrm{Z}6^1$, used by Principia\footnote{The implementation in \#1802 uses a suboptimal evaluation strategy in step~\ref{ThirdPrecision}; the latencies shown are for the evaluation strategy described in appendix~\ref{Performance} instead.} from pull request \href{https://github.com/mockingbirdnest/Principia/pull/1802}{\#1802}, in 2018, until it was replaced by a correctly-rounded method; \item method $3^2_{\mathrm{C}}\mathrm{Z}5^1$, which backs the correctly-rounded implementation without FMA; \item method $3^2_{\mathrm{C}}\mathrm{Z}6^1$ for comparison with $3^2_{\mathrm{C}}\mathrm{Z}5^1$, see the discussion of step~\ref{HighOrder}; \item method $3^2_{\mathrm{C}}\mathrm{N}6^1$, the most accurate faithful method without FMA; \item method $5^2\mathrm{Z}4^1\mathrm{FMA}$, which backs the correctly-rounded implementation with FMA; \item method $5^2\mathrm{N}4^1\mathrm{FMA}$, the most accurate faithful method with FMA. \end{itemize} We compare these with following pre-existing implementations of the cube root: \begin{itemize}[nosep] \item W.~Fullerton's 1977 function \texttt{DCBRT} from FNLIB\footnote{We thank Peter Barfuss for translating this function from FORTRAN to C++ for the purposes of this comparison.}; \item S.~L.~Moshier's 1984 function \texttt{cbrt} from the Cephes Mathematical Library; \item W.~Kahan's 1991 methods described in \cite[3]{KahanBindel2001} ``to serve the IEEE Double precision and VAX G formats'', implemented as written; Kahan recommends that the high-order step be of fourth order in that case, and gives two fourth order methods; further, he does not specify how the rounding to a third of the precision should be performed, so we end up with four possible implementations, identified by the coefficient of the asymptotic error, $\mathrm Γ$ in Kahan's notation, and by the rounding direction of step~\ref{RoundedApproximation} ($\mathrm{N}$ for nearest, $\mathrm{Z}$ for directed toward zero); \item P.~J.~Plauger's 1992 function \texttt{cbrt} from Dinkumware's C library, notably used by Microsoft's UCRT; \item Sun Microsystems, Inc.'s 1993 function \texttt{cbrt} from FDLIBM; \item U.~Drepper's 1997 function \texttt{cbrt} from the GNU C Library; \item B.~Evans's 2005 modification of Sun's function \texttt{cbrt}, from the FreeBSD C library, also used by musl; \item J.~Maddock's 2006 function \texttt{boost::math::cbrt<double>} from the Boost Library; \item S.~Canon and J.~Kidder's 2008 function \texttt{cbrt} from Apple's Libm; \item Advanced Micro Devices, Inc.'s 2014 function \texttt{cbrt} from libclc; \item njuffa's 2014 function \texttt{cbrt}, given in a Stack Overflow answer to a question by P.~Cuoq about a correctly-rounded cube root. \item N.~Brunie's 2018 metafunction \texttt{ml\_cbrt} from metalibm, for binary64 and AVX2. Note that this function is listed as unstable, rather than officially supported, in the metalibm user guide; indeed it fails to be faithful in binary64. \end{itemize} \begin{table}[b!] \setmainfont[Mapping=tex-text, Numbers={OldStyle, Monospaced}, Ligatures={TeX, Common, Discretionary}, SmallCapsFeatures={Letters=SmallCaps}, Contextuals=WordFinal,]{Linux Libertine O} \begin{center} \begin{tabular}{rlrr} Method && Misrounding rate & Unfaithful rnd.~rate \\ \hline Fullerton, 1977& $\dagger$ & $2.60500(14)\Multiply10^{-1}$ & $3.07788(55)\Multiply10^{-2}$ \\ Moshier, 1984& & $8.55740(89)\Multiply10^{-2}$ \\ Kahan, 1991 & $\mathrm{Γ}=-1$ $\mathrm{Z}$ & $2.0044(15)\Multiply10^{-3}$ \\ \emph{ibid.}& $\mathrm{Γ}=-1$ $\mathrm{N}$ & $9.6466(99)\Multiply10^{-4}$ \\ \emph{ibid.}& $\mathrm{Γ}=-35/3$ $\mathrm{Z}$ & $1.7236(42)\Multiply10^{-4}$ \\ \emph{ibid.}& $\mathrm{Γ}=-35/3$ $\mathrm{N}$ & $8.367(29)\Multiply10^{-5}$ \\ Plauger, 1992& $\dagger$ & $3.02720(15)\Multiply10^{-1}$ & $2.66855(51)\Multiply10^{-2}$\\ Sun, 1993& & $8.31642(88)\Multiply10^{-2}$ \\ Drepper, 1997& $\dagger$ & $4.96271(16)\Multiply10^{-1}$ & $1.76785(13)\Multiply10^{-1}$ \\ Sun--Evans, 2005& & $8.33311(88)\Multiply10^{-2}$ \\ Maddock, 2006& $\dagger$ & $4.56285(16)\Multiply10^{-1}$ & $1.41547(12)\Multiply10^{-1}$\\ Canon--Kidder, 2008& & $8.02588(86)\Multiply10^{-2}$ \\ AMD, 2014& $\mathrm{FMA}$ & $1.6235(41)\Multiply10^{-4}$ \\ njuffa, 2014& $\mathrm{FMA}$ & $1.529(40)\Multiply10^{-6}$ \\ Brunie, 2018& $\mathrm{FMA}$ $\dagger$ & $1.43881(12)\Multiply10^{-1}$ & $5.918(25)\Multiply10^{-5}$\\ Leroy, 2018 & $3^1\mathrm{Z}6^1$ & $5.155(72)\Multiply10^{-6}$ \\ Leroy, 2021 & $3^2_{\mathrm{C}}\mathrm{Z}5^1$ & $4.564(68)\Multiply10^{-6}$ \\ \emph{ibid.}& $3^2_{\mathrm{C}}\mathrm{Z}6^1$ & $4.337(66)\Multiply10^{-6}$ \\ \emph{ibid.}& $3^2_{\mathrm{C}}\mathrm{N}6^1$ & $2.415(50)\Multiply10^{-6}$ \\ \emph{ibid.}& $5^2\mathrm{Z}4^1\mathrm{FMA}$ & $6.10(25)\Multiply10^{-9}$ \\ \emph{ibid.}& $5^2\mathrm{N}4^1\mathrm{FMA}$ & $2.99(18)\Multiply10^{-9}$ \end{tabular} \end{center} \caption{Misrounding and unfaithful rounding rates of various methods for the computation of a binary64 cube root. The letters $\mathrm{FMA}$ mark methods that use FMA; an obelisk marks unfaithful methods; the $0$s are omitted for faithful methods in the unfaithful rounding rate column.\label{TableMisroundingRates}} \end{table} The estimates of the latencies and their uncertainties were taken by supposing that the observed cycle counts follow a three-parameter logarithmic normal distribution, with the latency of the benchmark loop being its terminus, the other two parameters defining the distribution of random slowdowns. The maximum likelihood estimate of the terminus and the variance of that estimate were computed as described in \cite{Cohen1951}. The measured latency of a benchmark loop with a no-op function passed instead of the cube root was subtracted from these measurements, with the standard uncertainties combined under the assumption that they were uncorrelated. The misrounding and unfaithful rounding rates were estimated by sampling a billion values uniformly at random from the set $\intclop{1}{8}\Intersection\text{binary64}$ for methods with a misrounding rate greater than one per million, and a hundred billion values for the others. These rates are given in table~\ref{TableMisroundingRates}. \emergencystretch 1em \end{document}
{ "alphanum_fraction": 0.7043732463, "avg_line_length": 63.7264797508, "ext": "tex", "hexsha": "ba87b4f4c4a470274a2763661fd515af3644841f", "lang": "TeX", "max_forks_count": 92, "max_forks_repo_forks_event_max_datetime": "2022-03-21T03:35:37.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-11T23:08:58.000Z", "max_forks_repo_head_hexsha": "64c4c6c124f4744381b6489e39e6b53e2a440ce9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "pleroy/Principia", "max_forks_repo_path": "documentation/cbrt.tex", "max_issues_count": 1019, "max_issues_repo_head_hexsha": "9292ea1fc2e4b4f0ce7a717e2f507168519f5f8a", "max_issues_repo_issues_event_max_datetime": "2022-03-29T21:02:15.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-03T11:42:27.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "net-lisias-ksp/Principia", "max_issues_repo_path": "documentation/cbrt.tex", "max_line_length": 508, "max_stars_count": 565, "max_stars_repo_head_hexsha": "9292ea1fc2e4b4f0ce7a717e2f507168519f5f8a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "net-lisias-ksp/Principia", "max_stars_repo_path": "documentation/cbrt.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-22T12:04:58.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-04T21:47:18.000Z", "num_tokens": 35336, "size": 102281 }
\documentclass[11pt]{article} \usepackage{amsmath, amssymb} \usepackage{geometry} \usepackage{graphicx} \begin{document} \section{Introduction} We consider as our image, the following function, defined on $[-1, 1] \times: [-1, 1]$ \begin{equation}\label{eq:function} \begin{split} f(x, y) &= 0.5 - 0.2x - 0.3y - 0.05x^2 + 0.03xy + 0.04y^2 \\ &+ 0.03y^3 + 0.001x^3 - 0.0015x^2y + 0.002xy^2 - 0.0005y^3. \end{split} \end{equation} This function was chosen to have no critical points, and to be predominantly linear, so that the higher order derivatives were not too large. This was predominantly so the higher order terms in the signatures would not overwhelm the smaller terms. The function is depicted as a greyscale image in Figure~\ref{fig:function} \begin{figure} \centering \includegraphics[width=12cm]{figures/function} \caption{The test function used in this document, from Eq.~\eqref{eq:function}.}\label{fig:function} \end{figure} We consider the signatures of this function under various transformation groups, as detailed throughout the main body of the paper and illustrate the signature in three dimensions. A grid is overlaid on the image of the function itself, as shown in Fig~\ref{fig:function_scanlines}. This grid is mapped to the signature surface to help understand what the signature looks like locally. \begin{figure} \centering \includegraphics[width=12cm]{figures/function_scanlines} \caption{The test function used in this document, from Eq.~\eqref{eq:function} with a grid of lines overlaid.}\label{fig:function_scanlines} \end{figure} \section{The transformation groups} \subsection{Sim(2)} For $\text{Sim}(2)$, we use the signature $(I_1, I_2, I_3)$, defined by \begin{equation}\label{eq:sim2sig} I_i = \frac{f}{\sqrt{J_1^2 + J_2^2 + J_3^2}} J_i, \qquad i \in \{1, 2, 3\} \end{equation} where \begin{equation*} \begin{split} J_1 &= f_x^2 + f_y^2, \\ J_2 &= (f_{xx} + f_{xy})^2, \\ J_3 &= f_{xx}^2 + 2f_{xy}^2 + f_{yy}^2. \end{split} \end{equation*} The signature is visualised in Fig.~\ref{fig:sim2signature} \begin{figure} \centering \includegraphics[width=12cm]{figures/Sim2_signature} \caption{$\text{Sim}(2)$ signature} \label{fig:sim2signature} \end{figure} \subsection{SE(2)} For $SE(2)$, we use the signature \begin{equation}\label{eq:se2sig} \begin{split} I_1 &= f \\ I_2 &= f_x^2 + f_y^2 \\ I_3 &= f_x^2 f_{xy} + f_x f_y f_{yy} - f_x f_y f_{xx} - f_y^2 f_{xy} \end{split} \end{equation} The signature is visualised below in Fig.~\ref{fig:se2signature} \begin{figure} \centering \includegraphics[width=12cm]{figures/SE2_signature} \caption{$SE(2)$ signature} \label{fig:se2signature} \end{figure} \subsection{E(2)} For $E(2)$, we use the signature \begin{equation}\label{eq:e2sig} \begin{split} I_1 &= f \\ I_2 &= f_x^2 + f_y^2 \\ I_3 &= f_{xx} + f_{yy} \end{split} \end{equation} The signature is visualised in Fig.~\ref{fig:e2signature} \begin{figure} \centering \includegraphics[width=12cm]{figures/E2_signature} \caption{$E(2)$ signature} \label{fig:e2signature} \end{figure} \subsection{SA(2)} For $SA(2)$, we use the signature \begin{equation}\label{eq:sa2sig} \begin{split} I_1 &= f_{xx}f_{yy} - f_{xy}^2 \\ I_2 &= f_y^2f_{xx} - 2f_x f_y f_{xy} + f_x^2f_{yy} \\ I_3 &= f_{xxx}f_y^3 - 3*f_{xxy}f_x f_y^2 + 3f_{xyy}f_x^2f_y - f_{yyy}f_x^3 \end{split} \end{equation} The signature is visualised in Fig.~\ref{fig:sa2signature} \begin{figure} \centering \includegraphics[width=12cm]{figures/SA2_signature} \caption{$SA(2)$ signature} \label{fig:sa2signature} \end{figure} \subsection{A(2)} For $A(2)$, we use the signature \begin{equation}\label{eq:a2sig} \begin{split} I_1 &= \frac{fC^3}{\sqrt{C^6 + D^6 + E^4}} \\ I_2 &= \frac{fD^3}{\sqrt{C^6 + D^6 + E^4}} \\ I_3 &= \frac{fE^2}{\sqrt{C^6 + D^6 + E^4}} \\ \end{split} \end{equation} where \begin{equation*} \begin{split} C &= f_{xx}f_{yy} - f_{xy}^2 \\ D &= f_y^2f_{xx} - 2f_xf_y f_{xy} + f_x^2f_{yy} \\ E &= f_{xxx}f_y^3 - 3f_{xxy}f_x f_y^2 + 3f_{xyy}f_x^2 f_y - f_{yyy}f_x^3 \end{split} \end{equation*} The signature is visualised in Fig.~\ref{fig:a2signature} \begin{figure} \centering \includegraphics[width=12cm]{figures/A2_signature} \caption{$A(2)$ signature} \label{fig:a2signature} \end{figure} \subsection{M{\"o}bius} For $\text{M{\"o}bius}$, we use the signature \begin{equation}\label{eq:mobiussig} \begin{split} I_1 &= \frac{fJ_1^4}{\sqrt{J_1^8 + K_1^2 + K_4^2}} \\ I_2 &= \frac{fK_1}{\sqrt{J_1^8 + K_1^2 + K_4^2}} \\ I_3 &= \frac{fK_4}{\sqrt{J_1^8 + K_1^2 + K_4^2}} \end{split} \end{equation} where \begin{equation*} \begin{split} J_1 &= f_xf_x + f_yf_y \\ J_2 &= f_{xx} + f_{yy} \\ J_3 &= J_1(f_x(f_{xxx} + f_{xyy}) + f_y(f_{xxy} + f_{yyy})) + 2J_2(f_x^2f_{xx} + 2f_xf_yf_{xy} + f_y^2f_{yy}) \\ J_4 &= J_1(f_x(f_{xxy} + f_{yyy}) - f_y(f_{xxx} + f_{xyy})) + 2J_2(f_x^2f_{xy} + f_xf_yf_{yy} - f_xf_yf_{xx} - f_y^2f_{xy}) \\ K_1 &= f_y^5f_{yyy} + 9/2f_y^2f_{yy}^2f_x^2 + f_y^3f_{yyy}f_x^2 + 3/2f_{yy}^2f_x^4 -9f_y^3f_{yy}f_xf_{xy} + 3f_yf_{yy}f_x^3f_{xy} + \\ & 3/2f_y^4f_{xy}^2 - 9f_y^2f_x^2f_{xy}^2 + 3/2f_x^4f_{xy}^2 + 3f_y^4f_xf_{xyy} + 3f_y^2f_x^3f_{xyy} + \\ & 3f_y^4f_{yy}f_{xx} + 3f_{yy}f_x^4f_{xx} + 3f_y^3f_xf_{xy}f_{xx} - 9f_yf_x^3f_{xy}f_{xx} + 3/2f_y^4f_{xx}^2 + 9/2f_y^2f_x^2f_{xx}^2 + \\ & 3f_y^3f_x^2f_{xxy} + 3f_yf_x^4f_{xxy} + f_y^2f_x^3f_{xxx} + f_x^5f_{xxx} \\ K_4 &= -3f_yf_{yy}^2f_x^3 + f_y^2f_{yyy}f_x^3 + f_{yyy}f_x^5 + 9f_y^2f_{yy}f_x^2f_{xy} - 3f_{yy}f_x^4f_{xy} - 6f_y^3f_xf_{xy}^2 + \\ & 6f_yf_x^3f_{xy}^2 - 3f_y^3f_x^2f_{xyy} - 3f_yf_x^4f_{xyy} - 3f_y^3f_{yy}f_xf_{xx} + 3f_yf_{yy}f_x^3f_{xx} + 3f_y^4f_{xy}f_{xx} - \\ & 9f_y^2f_x^2f_{xy}f_{xx} + 3f_y^3f_xf_{xx}^2 + 3f_y^4f_xf_{xxy} + 3f_y^2f_x^3f_{xxy} - f_y^5f_{xxx} - f_y^3f_x^2f_{xxx} \\ \end{split} \end{equation*} The signature is visualised in Fig.~\ref{fig:mobiussignature} \begin{figure} \centering \includegraphics[width=12cm]{figures/Mobius_signature} \caption{$\text{M{\"o}bius}$ signature} \label{fig:mobiussignature} \end{figure} \subsection{$PSL(3, \mathbb{R})$} For $PSL(3, \mathbb{R})$, we use the signature \begin{equation}\label{eq:psl3rsig} \begin{split} I_1 &= \frac{f}{\gamma}D^6 \\ I_2 &= \frac{f}{\gamma}P_4^3\\ I_3 &= \frac{f}{\gamma}(E^2- 12DP_5)^2 \end{split} \end{equation} where \begin{equation*} \begin{split} C &= f_{xx}f_{yy} - f_{xy}^2 \\ D &= f_y^2f_{xx} - 2f_xf_yf_{xy} + f_x^2f_{yy} \\ E &= f_{xxx}f_y^3 - 3f_{xxy}f_xf_y^2 + 3f_{xyy}f_x^2f_y - f_{yyy}f_x^3 \\ \gamma &= \sqrt{D^{12} + P_4^6 + (E^2 - 12DP_5)^4} \\ Q_4 &= 4f_xf_{xy}^2f_{xyy} - f_x^2f_{xyy}^2 - 2f_{yyy}f_xf_{xy}f_{xx} + \\ & 2f_{yy}f_xf_{xyy}f_{xx} - 6f_yf_{xy}f_{xyy}f_{xx} + \\ & 2f_yf_{yyy}f_{xx}^2 + f_{yyy}f_x^2f_{xxy} - 6f_{yy}f_xf_{xy}f_{xxy} + \\ & 4f_yf_{xy}^2f_{xxy} + f_yf_xf_{xyy}f_{xxy} + 2f_yf_{yy}f_{xx}f_{xxy} - \\ & f_y^2f_{xxy}^2 + 2f_{yy}^2f_xf_{xxx} - f_yf_{yyy}f_xf_{xxx} \\ & -2f_yf_{yy}f_{xy}f_{xxx} + f_y^2f_{xyy}f_{xxx} \\ P_5 &= f_{yyy}f_x^3f_{xy} - f_{yy}f_x^2f_{xy}^2 + 2f_yf_xf_{xy}^3 - \\ & f_{yy}f_x^3f_{xyy} - f_yf_x^2f_{xy}f_{xyy} + f_{yy}^2f_x^2f_{xx} - \\ & f_yf_{yyy}f_x^2f_{xx} - 2f_yf_{yy}f_xf_{xy}f_{xx} - \\ & f_y^2f_{xy}^2f_{xx} + 2f_y^2f_xf_{xyy}f_{xx} + f_y^2f_{yy}f_{xx}^2 + \\ & 2f_yf_{yy}f_x^2f_{xxy} - f_y^2f_xf_{xy}f_{xxy} - f_y^3f_{xx}f_{xxy} - \\ & f_y^2f_{yy}f_xf_{xxx} + f_y^3f_{xy}f_{xxx} \\ P_4 &= -4C^2 + Q_4 \end{split} \end{equation*} The signature is visualised in Fig.~\ref{fig:psl3rsignature} \begin{figure} \centering \includegraphics[width=12cm]{figures/PSL3R_signature} \caption{$PSL(3, \mathbb{R})$ signature} \label{fig:psl3rsignature} \end{figure} \end{document}
{ "alphanum_fraction": 0.6379310345, "avg_line_length": 34.3982683983, "ext": "tex", "hexsha": "20d7ea865d706b9c6c44d4eb3cda924e231bd381", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ed70550c21f51a5e1a53c6ea423d1db6f0f0d037", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rgbrown/invariants", "max_forks_repo_path": "paper/figpaper.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ed70550c21f51a5e1a53c6ea423d1db6f0f0d037", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rgbrown/invariants", "max_issues_repo_path": "paper/figpaper.tex", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "ed70550c21f51a5e1a53c6ea423d1db6f0f0d037", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rgbrown/invariants", "max_stars_repo_path": "paper/figpaper.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3645, "size": 7946 }
\section{Tutorial} The aim of the tutorial mode is to show the user the variety of PIPS abilities. At the begining, the chosen example is loaded, as is shown in Figure~\ref{fig:tutorial_screen} (example ``Acca-2011''). \begin{figure}[h!] \centering \includegraphics[width=0.8\textwidth]{reportCh4/tutorial_screen} \caption{``Acca-2011'' tutorial screen.} \label{fig:tutorial_screen} \end{figure} This mode does not require a significant interaction with the user - the source code ({\bf 1.}) and the script with operations and comments ({\bf 2.}) are provided. The user only needs to change the current step using the slider ({\bf 3.}). It is very flexible and stepping does not have to be sequential: the user can go back or skip some steps. The current step and the numbers of all steps are displayed over the slider ({\bf 4.}).
{ "alphanum_fraction": 0.7494061758, "avg_line_length": 40.0952380952, "ext": "tex", "hexsha": "4037787ac8a7051232c9087aa25ae9db06f31cfd", "lang": "TeX", "max_forks_count": 12, "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_path": "packages/PIPS/pips/src/Documentation/paws/reportCh4/tutorial.tex", "max_issues_count": 7, "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_path": "packages/PIPS/pips/src/Documentation/paws/reportCh4/tutorial.tex", "max_line_length": 70, "max_stars_count": 51, "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_path": "packages/PIPS/pips/src/Documentation/paws/reportCh4/tutorial.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "num_tokens": 231, "size": 842 }
%================================================================= \ifx% \wholebook% \relax% \else % -------------------------------------------- % Lulu: \documentclass[a4paper,10pt,twoside]{book} \usepackage[ papersize={6.13in,9.21in}, hmargin={.75in,.75in}, vmargin={.75in,1in}, ignoreheadfoot ]{geometry} \input{../common.tex} \setboolean{lulu}{true} % -------------------------------------------- % A4: % \documentclass[a4paper,11pt,twoside]{book} % \input{../common.tex} % \usepackage{a4wide} % -------------------------------------------- \graphicspath{{figures/} {../figures/}} \begin{document} \fi %================================================================= %\renewcommand{\nnbb}[2]{} % Disable editorial comments \sloppy %================================================================= \chapter{\pharos Cheat sheet} \chalabel{cheatSheet} Welcome to PhaROS! \newline Remember always to check for updates in \url{http://car.mines-douai.fr/squeaksource/PhaROS.html}. \newline \paragraph{Running the generated package \newline} The generated package has implemented some scripts to show you the basic usage of PhaROS. In PhaROS a package has scripts. Each scripts represents a Node. Each of these nodes share the same class and code. So, take that in count when you implement a new script. \paragraph{Basic ROS commands for your generated package \newline} For browse your package trough command line. \newline roscd name-of-your-package \newline For editing the image \newline rosrun name-of-your-package edit \newline For running a script with graphical interface \newline rosrun name-of-your-package pharos script-name \newline For running a script without graphical interface \newline rosrun name-of-your-package headless script-name \newline For listing your scripts \newline rosls name-of-your-package/image/scripts \newline \section{Inside a package object } \paragraph{Creating a new Script \newline} For creating a new script you need to add to your package class a method named script{NameOfYourScript}. Inside this method you will have available a controller, which is an object that gives construction facilities and access to an already built node. self controller node. Inside this method you configure the given node and trigger all the logic of your node. (So, from ROS point of view, each script is a node) In order to make this script available for execution you have two possibilities: \begin{itemize} \item write a text file that executes this method ( PackageName new script{NameOfYourScript}. ) and save it into packageFolder/image/scripts \item commit all your code and use the pharos tool to install it back, this will generate all the needed files. (we are working for making this step easier) \end{itemize} For examples of what an script is, browse the generated example package and match names with the names available in the script folder. (roscd {package-name}/image/scripts). \paragraph{Publish topic \newline} \begin{code} | publisher | publisher := self controller node topicPublisher: '/example/string' typedAs: 'std-msgs/String'. publisher send:[ : string | string data: 'this is an example' ]. \end{code} \paragraph{Subscribe topic \newline} \begin{code} (self controller node buildConnectionFor: '/example/string' ) typedAs: 'std-msgs/String'; for: [ : string | Transcript show: string data ]; connect . \end{code} \paragraph{Call service \newline} \begin{code} | service | service := self controller node service: '/rosout/get-loggers'. service call. \end{code } \paragraph{Define service \newline} \beign{code} self controller node serve: [ :req :rsp | Transcript show: 'Service has been called.'; cr. ] at: '/pharos/service' typedAs:'roscpp/Empty'. \end{code} \paragraph {Inject/install a nodelet \newline} \begin{code} self controller nodelets use: YourNodeletClass as:\#nameToBeInvoked. \end{code} \paragraph{Specifying controller configuration \newline} In the package object implement the message \#buildController. Build controller has the responsibility to build the controller and return it. For building your own controller \begin{code} buildController ^ MyController build buildController \^ self myControllerConfigurationMethod: super buildController. myControllerConfigurationMethod: aController << Make here your configurations >> \^ aController \end{code} \paragraph{Define a new type \newline} Define as class method a method with a cool name, as myCoolTypeDefinition \begin{code} myCoolTypeDefinition ^ PhaROSCompositeType named: 'anStandarROS/TypeName' definedBy: { #header -> (PhaROSTypeBrowser instance definition: 'std-msgs/Header'). #auint8 -> (PhaROSUInt8Type new). #auint16 -> (PhaROSUInt16Type new). #aint32 -> (PhaROSInt32Type new). #afloat32 -> (PhaROSFloat32Type new). #afloat64 -> (PhaROSFloat64Type new). #astring ->( PhaROSStringType new ) . #atime -> (PhaROSTimeType new ). } withConstants: { #CONSTANT -> ASimpleObjectValue }. \end{code} As shown in the definition you give an array of associations with (\#nameOfTheField -> Type new). For checking all the available types, just browse any of this classes to go to the package. Or check the reference. Constants values cannot be complex. Just numbers, strings, booleans. \paragraph{Register a type \newline} Define in class side of your package the method \#types \begin{code} types ^ super types, { #YourTypeName -> self myCoolTypeDefinition }. \end{code} In order to deploy the type into ROS you will need to commit all your work and install it through the pharos command (as shown in the shell commands section). We are working to enhance this step. \section{Shell commands} \paragraph{Install PhaROS based Package\newline} pharos install PACKAGE [OPTIONS]\newline Example \newline \-\-pharos install esug --location=/home/user/ros/workspace --version=2.0\newline Help \-\-pharos install --help\newline \paragraph {Create PhaROS based Package\newline} pharos create PACKAGE [OPTIONS]\newline Example\newline \-\-pharos create --location=/home/user/ros/workspace --version=2.0 --author=YourName --author-email=YourEmail \newline \-\-Tip: Be sure the email is a correct one. If is not a correctly spelled one you will notice during last step. \newline Help \newline \-\-pharos create --help\newline \paragraph{Register Repository of packages \newline} pharos register-repository --url=anUrl --package=aPackage [ OPTIONS ]\newline Example \newline \-\-pharos register-repository --url=http://smalltalkhub.com/mc/user/YourProject/main --package=YourProjectDirectory --directory=YourProjectDirectory \newline \-\-Tip: If your repository requires user/password for reading add --user=User --password=Password to the example. \newline \-\-Disclaimer: User/Password will be stored in a text file without any security. \newline Help \-\-pharos register-repository --help\newline \paragraph{Listing registered repositories\newline} pharos list-repositories \paragraph{Creating a directory for your own project repository\newline} pharos create-repository PACKAGENAME [ OPTIONS ]\newline Example \newline \-\-pharos create-repository example --user=UserName > directory.st\newline \-\-pharos create-repository example --user=UserName --output= directory.st\newline Help \newline \-\-pharos create-repository --help \newline %\chapterauthor{\authorjankurs{} \\ \authorguillaume{} \\ \authorlukas{}} %============================================================= \ifx\wholebook\relax\else \end{document} \fi %=============================================================
{ "alphanum_fraction": 0.6876884422, "avg_line_length": 30.6153846154, "ext": "tex", "hexsha": "f82e65a206bc571213692169fe1a56e0424c7931", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4f25eaf2b76663741fb6fc8df75792c028057c4d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "CARMinesDouai/PhaROS-Book", "max_forks_repo_path": "0ld-PhaROS-book/cheatSheet/cheatSheet.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4f25eaf2b76663741fb6fc8df75792c028057c4d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "CARMinesDouai/PhaROS-Book", "max_issues_repo_path": "0ld-PhaROS-book/cheatSheet/cheatSheet.tex", "max_line_length": 263, "max_stars_count": 2, "max_stars_repo_head_hexsha": "4f25eaf2b76663741fb6fc8df75792c028057c4d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "CARMinesDouai/PhaROS-Book", "max_stars_repo_path": "0ld-PhaROS-book/cheatSheet/cheatSheet.tex", "max_stars_repo_stars_event_max_datetime": "2018-06-19T14:11:58.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-09T15:17:58.000Z", "num_tokens": 2069, "size": 7960 }
\section{Future work}\label{sec:future-work} Future work for this dataset would include the tagging of all named entities in each assembly operation (instead of having a list for the entire procedure). Moreover, the assembly graph containing both the assembly order and the spatial disposition of the product components would be useful for validating more complex information extraction systems which intend to recover the full assembly knowledge from the text and image representation alone (without operator assistance). For a system which aims to extract only the named entities in the assembly operations for speeding up object recognition (by restricting the database of object models to search for and recognize), it would be useful to start with the raw text from the \glspl{pdf} and perform an initial text preprocessing. This stage could include word tokenization, sentence splitting, \gls{pos} tagging and morphological analysis. Latter on, it could be used a gazetter in conjunction with machine learning algorithms (such as \glspl{hmm} or \glspl{crf}) to detect the named entities in the textual assembly operations. After having the named entities, it could be used an orthographic matcher to perform named entity coreference to find different mentions of the same entity and also type disambiguation in order to use word context to make sure that the semantic analysis was correct. The evaluation of a system with these goals can be done by comparing the list of named entities identified as assembly objects with the dataset validation list of product object components. Moreover, if the dataset has named entities tags for each word in the testing dataset, then a more complete evaluation can be done, allowing to assess the reliability of the entity disambiguation and coreference algorithms. Either way, this evaluation would result in the computation of the precision, recall, accuracy and F1 scores for the recognized entities given the list of entities that the \gls{ner} system was supposed to detect using a k-fold cross validation approach to split the dataset into training and test text.
{ "alphanum_fraction": 0.820415879, "avg_line_length": 352.6666666667, "ext": "tex", "hexsha": "887af586d97b87cd9d99d442f614f6c12209914d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9dfc09f15ede9cfe9f819c7ef6b652a52f298100", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "carlosmccosta/Assembly-Named-Entity-Recognition-Dataset", "max_forks_repo_path": "article/tex/sections/future-work.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9dfc09f15ede9cfe9f819c7ef6b652a52f298100", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "carlosmccosta/Assembly-Named-Entity-Recognition-Dataset", "max_issues_repo_path": "article/tex/sections/future-work.tex", "max_line_length": 1590, "max_stars_count": null, "max_stars_repo_head_hexsha": "9dfc09f15ede9cfe9f819c7ef6b652a52f298100", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "carlosmccosta/Assembly-Named-Entity-Recognition-Dataset", "max_stars_repo_path": "article/tex/sections/future-work.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 405, "size": 2116 }
% -*- LaTeX -*- % -*- coding: utf-8 -*- % % michael a.g. aïvázis % orthologue % (c) 1998-2021 all rights reserved % %----------------------------------- \section{applications} % -------------------------------------- % the application harness \begin{frame}[fragile] % \frametitle{Creating an application} % \vskip -3ex \begin{itemize} % \item applications are the top level component managers % \python{firstnumber=10,linerange={10-34}}{listings/quad.py} % \end{itemize} % \end{frame} % -------------------------------------- % the application harness \begin{frame}[fragile] % \frametitle{Auto-launching} % \begin{itemize} % \item instantiating and launching the application % \python{firstnumber=54,linerange={54-60}}{listings/quad.py} % \item a sample configuration file % \cfg{firstnumber=8, linerange={8-20}}{listings/quad.cfg} % \end{itemize} % \end{frame} % -------------------------------------- % the application component \begin{frame}[fragile] % \frametitle{The application component} % \begin{itemize} % \item the shell hierarchy in pyre % \begin{figure} \includegraphics[scale=1.0]{figures/shells.pdf} \end{figure} % \item our \identifier{Quad} derives from \identifier{Application}, so it has a \identifier{shell} % \end{itemize} % \end{frame} % -------------------------------------- % the parallel version \begin{frame}[fragile] % \frametitle{Parallel integration} % \begin{itemize} % \item the \identifier{mpi} entry point % \python{firstnumber=36,linerange={36-52}}{listings/quad.py} % \item the \package{mpi} package is part of the pyre distribution \begin{itemize} \item handles initialization and finalization of \package{MPI} \item simplifies most of the ``overhead'' activities \item provides an OO veneer \end{itemize} % \end{itemize} % \end{frame} % -------------------------------------- % running the mpi program \begin{frame}[fragile] % \frametitle{Running in parallel} % \begin{itemize} % \item minor modifications to the configuration file... % \cfg{firstnumber=8, linerange={8-28}}{listings/quad.cfg} % \end{itemize} % \end{frame} %----------------------------------- % end of file
{ "alphanum_fraction": 0.6067567568, "avg_line_length": 19.8214285714, "ext": "tex", "hexsha": "b6704335549f60277b2de7893fc33a7305f8ba7e", "lang": "TeX", "max_forks_count": 12, "max_forks_repo_forks_event_max_datetime": "2022-02-20T17:27:23.000Z", "max_forks_repo_forks_event_min_datetime": "2018-04-23T22:50:40.000Z", "max_forks_repo_head_hexsha": "7e1f0287eb7eba1c6d1ef385e5160079283ac363", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "avalentino/pyre", "max_forks_repo_path": "doc/overview/sections/applications.tex", "max_issues_count": 53, "max_issues_repo_head_hexsha": "7e1f0287eb7eba1c6d1ef385e5160079283ac363", "max_issues_repo_issues_event_max_datetime": "2021-10-07T21:41:32.000Z", "max_issues_repo_issues_event_min_datetime": "2018-05-31T04:55:00.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "avalentino/pyre", "max_issues_repo_path": "doc/overview/sections/applications.tex", "max_line_length": 80, "max_stars_count": 25, "max_stars_repo_head_hexsha": "7e1f0287eb7eba1c6d1ef385e5160079283ac363", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "avalentino/pyre", "max_stars_repo_path": "doc/overview/sections/applications.tex", "max_stars_repo_stars_event_max_datetime": "2021-12-10T06:01:23.000Z", "max_stars_repo_stars_event_min_datetime": "2018-04-23T01:45:39.000Z", "num_tokens": 626, "size": 2220 }
% % Chapter 5.5 % \section*{5.5 Average Value of a Function} The average value of \(x\) on \([a, b]\) is \[ f_{avg} = \frac{1}{b-a} \int_a^b f(x)dx \] \subsection*{Mean Value Theorem for Integrals} If \(f\) is continuous on \([a, b]\), then there exists a number \(c\) in \([a, b]\) such that \[ f(c) = f_{avg} = \frac{1}{b-a} \int_a^b f(x)dx \]
{ "alphanum_fraction": 0.58, "avg_line_length": 25, "ext": "tex", "hexsha": "35e708dc818dcb3c2a423d3c85ebf07bf8ea856c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b6b0a43ef551d1735ba4af55f3917ed1ed39e926", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "davidcorbin/calc-1-study-guide", "max_forks_repo_path": "tex/5-5.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b6b0a43ef551d1735ba4af55f3917ed1ed39e926", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "davidcorbin/calc-1-study-guide", "max_issues_repo_path": "tex/5-5.tex", "max_line_length": 94, "max_stars_count": null, "max_stars_repo_head_hexsha": "b6b0a43ef551d1735ba4af55f3917ed1ed39e926", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "davidcorbin/calc-1-study-guide", "max_stars_repo_path": "tex/5-5.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 138, "size": 350 }
\section{\module{parser} --- Access Python parse trees} % Copyright 1995 Virginia Polytechnic Institute and State University % and Fred L. Drake, Jr. This copyright notice must be distributed on % all copies, but this document otherwise may be distributed as part % of the Python distribution. No fee may be charged for this document % in any representation, either on paper or electronically. This % restriction does not affect other elements in a distributed package % in any way. \declaremodule{builtin}{parser} \modulesynopsis{Access parse trees for Python source code.} \moduleauthor{Fred L. Drake, Jr.}{[email protected]} \sectionauthor{Fred L. Drake, Jr.}{[email protected]} \index{parsing!Python source code} The \module{parser} module provides an interface to Python's internal parser and byte-code compiler. The primary purpose for this interface is to allow Python code to edit the parse tree of a Python expression and create executable code from this. This is better than trying to parse and modify an arbitrary Python code fragment as a string because parsing is performed in a manner identical to the code forming the application. It is also faster. There are a few things to note about this module which are important to making use of the data structures created. This is not a tutorial on editing the parse trees for Python code, but some examples of using the \module{parser} module are presented. Most importantly, a good understanding of the Python grammar processed by the internal parser is required. For full information on the language syntax, refer to the \citetitle[../ref/ref.html]{Python Language Reference}. The parser itself is created from a grammar specification defined in the file \file{Grammar/Grammar} in the standard Python distribution. The parse trees stored in the AST objects created by this module are the actual output from the internal parser when created by the \function{expr()} or \function{suite()} functions, described below. The AST objects created by \function{sequence2ast()} faithfully simulate those structures. Be aware that the values of the sequences which are considered ``correct'' will vary from one version of Python to another as the formal grammar for the language is revised. However, transporting code from one Python version to another as source text will always allow correct parse trees to be created in the target version, with the only restriction being that migrating to an older version of the interpreter will not support more recent language constructs. The parse trees are not typically compatible from one version to another, whereas source code has always been forward-compatible. Each element of the sequences returned by \function{ast2list()} or \function{ast2tuple()} has a simple form. Sequences representing non-terminal elements in the grammar always have a length greater than one. The first element is an integer which identifies a production in the grammar. These integers are given symbolic names in the C header file \file{Include/graminit.h} and the Python module \refmodule{symbol}. Each additional element of the sequence represents a component of the production as recognized in the input string: these are always sequences which have the same form as the parent. An important aspect of this structure which should be noted is that keywords used to identify the parent node type, such as the keyword \keyword{if} in an \constant{if_stmt}, are included in the node tree without any special treatment. For example, the \keyword{if} keyword is represented by the tuple \code{(1, 'if')}, where \code{1} is the numeric value associated with all \constant{NAME} tokens, including variable and function names defined by the user. In an alternate form returned when line number information is requested, the same token might be represented as \code{(1, 'if', 12)}, where the \code{12} represents the line number at which the terminal symbol was found. Terminal elements are represented in much the same way, but without any child elements and the addition of the source text which was identified. The example of the \keyword{if} keyword above is representative. The various types of terminal symbols are defined in the C header file \file{Include/token.h} and the Python module \refmodule{token}. The AST objects are not required to support the functionality of this module, but are provided for three purposes: to allow an application to amortize the cost of processing complex parse trees, to provide a parse tree representation which conserves memory space when compared to the Python list or tuple representation, and to ease the creation of additional modules in C which manipulate parse trees. A simple ``wrapper'' class may be created in Python to hide the use of AST objects. The \module{parser} module defines functions for a few distinct purposes. The most important purposes are to create AST objects and to convert AST objects to other representations such as parse trees and compiled code objects, but there are also functions which serve to query the type of parse tree represented by an AST object. \begin{seealso} \seemodule{symbol}{Useful constants representing internal nodes of the parse tree.} \seemodule{token}{Useful constants representing leaf nodes of the parse tree and functions for testing node values.} \end{seealso} \subsection{Creating AST Objects \label{Creating ASTs}} AST objects may be created from source code or from a parse tree. When creating an AST object from source, different functions are used to create the \code{'eval'} and \code{'exec'} forms. \begin{funcdesc}{expr}{source} The \function{expr()} function parses the parameter \var{source} as if it were an input to \samp{compile(\var{source}, 'file.py', 'eval')}. If the parse succeeds, an AST object is created to hold the internal parse tree representation, otherwise an appropriate exception is thrown. \end{funcdesc} \begin{funcdesc}{suite}{source} The \function{suite()} function parses the parameter \var{source} as if it were an input to \samp{compile(\var{source}, 'file.py', 'exec')}. If the parse succeeds, an AST object is created to hold the internal parse tree representation, otherwise an appropriate exception is thrown. \end{funcdesc} \begin{funcdesc}{sequence2ast}{sequence} This function accepts a parse tree represented as a sequence and builds an internal representation if possible. If it can validate that the tree conforms to the Python grammar and all nodes are valid node types in the host version of Python, an AST object is created from the internal representation and returned to the called. If there is a problem creating the internal representation, or if the tree cannot be validated, a \exception{ParserError} exception is thrown. An AST object created this way should not be assumed to compile correctly; normal exceptions thrown by compilation may still be initiated when the AST object is passed to \function{compileast()}. This may indicate problems not related to syntax (such as a \exception{MemoryError} exception), but may also be due to constructs such as the result of parsing \code{del f(0)}, which escapes the Python parser but is checked by the bytecode compiler. Sequences representing terminal tokens may be represented as either two-element lists of the form \code{(1, 'name')} or as three-element lists of the form \code{(1, 'name', 56)}. If the third element is present, it is assumed to be a valid line number. The line number may be specified for any subset of the terminal symbols in the input tree. \end{funcdesc} \begin{funcdesc}{tuple2ast}{sequence} This is the same function as \function{sequence2ast()}. This entry point is maintained for backward compatibility. \end{funcdesc} \subsection{Converting AST Objects \label{Converting ASTs}} AST objects, regardless of the input used to create them, may be converted to parse trees represented as list- or tuple- trees, or may be compiled into executable code objects. Parse trees may be extracted with or without line numbering information. \begin{funcdesc}{ast2list}{ast\optional{, line_info}} This function accepts an AST object from the caller in \var{ast} and returns a Python list representing the equivalent parse tree. The resulting list representation can be used for inspection or the creation of a new parse tree in list form. This function does not fail so long as memory is available to build the list representation. If the parse tree will only be used for inspection, \function{ast2tuple()} should be used instead to reduce memory consumption and fragmentation. When the list representation is required, this function is significantly faster than retrieving a tuple representation and converting that to nested lists. If \var{line_info} is true, line number information will be included for all terminal tokens as a third element of the list representing the token. Note that the line number provided specifies the line on which the token \emph{ends}. This information is omitted if the flag is false or omitted. \end{funcdesc} \begin{funcdesc}{ast2tuple}{ast\optional{, line_info}} This function accepts an AST object from the caller in \var{ast} and returns a Python tuple representing the equivalent parse tree. Other than returning a tuple instead of a list, this function is identical to \function{ast2list()}. If \var{line_info} is true, line number information will be included for all terminal tokens as a third element of the list representing the token. This information is omitted if the flag is false or omitted. \end{funcdesc} \begin{funcdesc}{compileast}{ast\optional{, filename\code{ = '<ast>'}}} The Python byte compiler can be invoked on an AST object to produce code objects which can be used as part of an \keyword{exec} statement or a call to the built-in \function{eval()}\bifuncindex{eval} function. This function provides the interface to the compiler, passing the internal parse tree from \var{ast} to the parser, using the source file name specified by the \var{filename} parameter. The default value supplied for \var{filename} indicates that the source was an AST object. Compiling an AST object may result in exceptions related to compilation; an example would be a \exception{SyntaxError} caused by the parse tree for \code{del f(0)}: this statement is considered legal within the formal grammar for Python but is not a legal language construct. The \exception{SyntaxError} raised for this condition is actually generated by the Python byte-compiler normally, which is why it can be raised at this point by the \module{parser} module. Most causes of compilation failure can be diagnosed programmatically by inspection of the parse tree. \end{funcdesc} \subsection{Queries on AST Objects \label{Querying ASTs}} Two functions are provided which allow an application to determine if an AST was created as an expression or a suite. Neither of these functions can be used to determine if an AST was created from source code via \function{expr()} or \function{suite()} or from a parse tree via \function{sequence2ast()}. \begin{funcdesc}{isexpr}{ast} When \var{ast} represents an \code{'eval'} form, this function returns true, otherwise it returns false. This is useful, since code objects normally cannot be queried for this information using existing built-in functions. Note that the code objects created by \function{compileast()} cannot be queried like this either, and are identical to those created by the built-in \function{compile()}\bifuncindex{compile} function. \end{funcdesc} \begin{funcdesc}{issuite}{ast} This function mirrors \function{isexpr()} in that it reports whether an AST object represents an \code{'exec'} form, commonly known as a ``suite.'' It is not safe to assume that this function is equivalent to \samp{not isexpr(\var{ast})}, as additional syntactic fragments may be supported in the future. \end{funcdesc} \subsection{Exceptions and Error Handling \label{AST Errors}} The parser module defines a single exception, but may also pass other built-in exceptions from other portions of the Python runtime environment. See each function for information about the exceptions it can raise. \begin{excdesc}{ParserError} Exception raised when a failure occurs within the parser module. This is generally produced for validation failures rather than the built in \exception{SyntaxError} thrown during normal parsing. The exception argument is either a string describing the reason of the failure or a tuple containing a sequence causing the failure from a parse tree passed to \function{sequence2ast()} and an explanatory string. Calls to \function{sequence2ast()} need to be able to handle either type of exception, while calls to other functions in the module will only need to be aware of the simple string values. \end{excdesc} Note that the functions \function{compileast()}, \function{expr()}, and \function{suite()} may throw exceptions which are normally thrown by the parsing and compilation process. These include the built in exceptions \exception{MemoryError}, \exception{OverflowError}, \exception{SyntaxError}, and \exception{SystemError}. In these cases, these exceptions carry all the meaning normally associated with them. Refer to the descriptions of each function for detailed information. \subsection{AST Objects \label{AST Objects}} Ordered and equality comparisons are supported between AST objects. Pickling of AST objects (using the \refmodule{pickle} module) is also supported. \begin{datadesc}{ASTType} The type of the objects returned by \function{expr()}, \function{suite()} and \function{sequence2ast()}. \end{datadesc} AST objects have the following methods: \begin{methoddesc}[AST]{compile}{\optional{filename}} Same as \code{compileast(\var{ast}, \var{filename})}. \end{methoddesc} \begin{methoddesc}[AST]{isexpr}{} Same as \code{isexpr(\var{ast})}. \end{methoddesc} \begin{methoddesc}[AST]{issuite}{} Same as \code{issuite(\var{ast})}. \end{methoddesc} \begin{methoddesc}[AST]{tolist}{\optional{line_info}} Same as \code{ast2list(\var{ast}, \var{line_info})}. \end{methoddesc} \begin{methoddesc}[AST]{totuple}{\optional{line_info}} Same as \code{ast2tuple(\var{ast}, \var{line_info})}. \end{methoddesc} \subsection{Examples \label{AST Examples}} The parser modules allows operations to be performed on the parse tree of Python source code before the bytecode is generated, and provides for inspection of the parse tree for information gathering purposes. Two examples are presented. The simple example demonstrates emulation of the \function{compile()}\bifuncindex{compile} built-in function and the complex example shows the use of a parse tree for information discovery. \subsubsection{Emulation of \function{compile()}} While many useful operations may take place between parsing and bytecode generation, the simplest operation is to do nothing. For this purpose, using the \module{parser} module to produce an intermediate data structure is equivalent to the code \begin{verbatim} >>> code = compile('a + 5', 'file.py', 'eval') >>> a = 5 >>> eval(code) 10 \end{verbatim} The equivalent operation using the \module{parser} module is somewhat longer, and allows the intermediate internal parse tree to be retained as an AST object: \begin{verbatim} >>> import parser >>> ast = parser.expr('a + 5') >>> code = ast.compile('file.py') >>> a = 5 >>> eval(code) 10 \end{verbatim} An application which needs both AST and code objects can package this code into readily available functions: \begin{verbatim} import parser def load_suite(source_string): ast = parser.suite(source_string) return ast, ast.compile() def load_expression(source_string): ast = parser.expr(source_string) return ast, ast.compile() \end{verbatim} \subsubsection{Information Discovery} Some applications benefit from direct access to the parse tree. The remainder of this section demonstrates how the parse tree provides access to module documentation defined in docstrings\index{string!documentation}\index{docstrings} without requiring that the code being examined be loaded into a running interpreter via \keyword{import}. This can be very useful for performing analyses of untrusted code. Generally, the example will demonstrate how the parse tree may be traversed to distill interesting information. Two functions and a set of classes are developed which provide programmatic access to high level function and class definitions provided by a module. The classes extract information from the parse tree and provide access to the information at a useful semantic level, one function provides a simple low-level pattern matching capability, and the other function defines a high-level interface to the classes by handling file operations on behalf of the caller. All source files mentioned here which are not part of the Python installation are located in the \file{Demo/parser/} directory of the distribution. The dynamic nature of Python allows the programmer a great deal of flexibility, but most modules need only a limited measure of this when defining classes, functions, and methods. In this example, the only definitions that will be considered are those which are defined in the top level of their context, e.g., a function defined by a \keyword{def} statement at column zero of a module, but not a function defined within a branch of an \keyword{if} ... \keyword{else} construct, though there are some good reasons for doing so in some situations. Nesting of definitions will be handled by the code developed in the example. To construct the upper-level extraction methods, we need to know what the parse tree structure looks like and how much of it we actually need to be concerned about. Python uses a moderately deep parse tree so there are a large number of intermediate nodes. It is important to read and understand the formal grammar used by Python. This is specified in the file \file{Grammar/Grammar} in the distribution. Consider the simplest case of interest when searching for docstrings: a module consisting of a docstring and nothing else. (See file \file{docstring.py}.) \begin{verbatim} """Some documentation. """ \end{verbatim} Using the interpreter to take a look at the parse tree, we find a bewildering mass of numbers and parentheses, with the documentation buried deep in nested tuples. \begin{verbatim} >>> import parser >>> import pprint >>> ast = parser.suite(open('docstring.py').read()) >>> tup = ast.totuple() >>> pprint.pprint(tup) (257, (264, (265, (266, (267, (307, (287, (288, (289, (290, (292, (293, (294, (295, (296, (297, (298, (299, (300, (3, '"""Some documentation.\n"""'))))))))))))))))), (4, ''))), (4, ''), (0, '')) \end{verbatim} The numbers at the first element of each node in the tree are the node types; they map directly to terminal and non-terminal symbols in the grammar. Unfortunately, they are represented as integers in the internal representation, and the Python structures generated do not change that. However, the \refmodule{symbol} and \refmodule{token} modules provide symbolic names for the node types and dictionaries which map from the integers to the symbolic names for the node types. In the output presented above, the outermost tuple contains four elements: the integer \code{257} and three additional tuples. Node type \code{257} has the symbolic name \constant{file_input}. Each of these inner tuples contains an integer as the first element; these integers, \code{264}, \code{4}, and \code{0}, represent the node types \constant{stmt}, \constant{NEWLINE}, and \constant{ENDMARKER}, respectively. Note that these values may change depending on the version of Python you are using; consult \file{symbol.py} and \file{token.py} for details of the mapping. It should be fairly clear that the outermost node is related primarily to the input source rather than the contents of the file, and may be disregarded for the moment. The \constant{stmt} node is much more interesting. In particular, all docstrings are found in subtrees which are formed exactly as this node is formed, with the only difference being the string itself. The association between the docstring in a similar tree and the defined entity (class, function, or module) which it describes is given by the position of the docstring subtree within the tree defining the described structure. By replacing the actual docstring with something to signify a variable component of the tree, we allow a simple pattern matching approach to check any given subtree for equivalence to the general pattern for docstrings. Since the example demonstrates information extraction, we can safely require that the tree be in tuple form rather than list form, allowing a simple variable representation to be \code{['variable_name']}. A simple recursive function can implement the pattern matching, returning a Boolean and a dictionary of variable name to value mappings. (See file \file{example.py}.) \begin{verbatim} from types import ListType, TupleType def match(pattern, data, vars=None): if vars is None: vars = {} if type(pattern) is ListType: vars[pattern[0]] = data return 1, vars if type(pattern) is not TupleType: return (pattern == data), vars if len(data) != len(pattern): return 0, vars for pattern, data in map(None, pattern, data): same, vars = match(pattern, data, vars) if not same: break return same, vars \end{verbatim} Using this simple representation for syntactic variables and the symbolic node types, the pattern for the candidate docstring subtrees becomes fairly readable. (See file \file{example.py}.) \begin{verbatim} import symbol import token DOCSTRING_STMT_PATTERN = ( symbol.stmt, (symbol.simple_stmt, (symbol.small_stmt, (symbol.expr_stmt, (symbol.testlist, (symbol.test, (symbol.and_test, (symbol.not_test, (symbol.comparison, (symbol.expr, (symbol.xor_expr, (symbol.and_expr, (symbol.shift_expr, (symbol.arith_expr, (symbol.term, (symbol.factor, (symbol.power, (symbol.atom, (token.STRING, ['docstring']) )))))))))))))))), (token.NEWLINE, '') )) \end{verbatim} Using the \function{match()} function with this pattern, extracting the module docstring from the parse tree created previously is easy: \begin{verbatim} >>> found, vars = match(DOCSTRING_STMT_PATTERN, tup[1]) >>> found 1 >>> vars {'docstring': '"""Some documentation.\n"""'} \end{verbatim} Once specific data can be extracted from a location where it is expected, the question of where information can be expected needs to be answered. When dealing with docstrings, the answer is fairly simple: the docstring is the first \constant{stmt} node in a code block (\constant{file_input} or \constant{suite} node types). A module consists of a single \constant{file_input} node, and class and function definitions each contain exactly one \constant{suite} node. Classes and functions are readily identified as subtrees of code block nodes which start with \code{(stmt, (compound_stmt, (classdef, ...} or \code{(stmt, (compound_stmt, (funcdef, ...}. Note that these subtrees cannot be matched by \function{match()} since it does not support multiple sibling nodes to match without regard to number. A more elaborate matching function could be used to overcome this limitation, but this is sufficient for the example. Given the ability to determine whether a statement might be a docstring and extract the actual string from the statement, some work needs to be performed to walk the parse tree for an entire module and extract information about the names defined in each context of the module and associate any docstrings with the names. The code to perform this work is not complicated, but bears some explanation. The public interface to the classes is straightforward and should probably be somewhat more flexible. Each ``major'' block of the module is described by an object providing several methods for inquiry and a constructor which accepts at least the subtree of the complete parse tree which it represents. The \class{ModuleInfo} constructor accepts an optional \var{name} parameter since it cannot otherwise determine the name of the module. The public classes include \class{ClassInfo}, \class{FunctionInfo}, and \class{ModuleInfo}. All objects provide the methods \method{get_name()}, \method{get_docstring()}, \method{get_class_names()}, and \method{get_class_info()}. The \class{ClassInfo} objects support \method{get_method_names()} and \method{get_method_info()} while the other classes provide \method{get_function_names()} and \method{get_function_info()}. Within each of the forms of code block that the public classes represent, most of the required information is in the same form and is accessed in the same way, with classes having the distinction that functions defined at the top level are referred to as ``methods.'' Since the difference in nomenclature reflects a real semantic distinction from functions defined outside of a class, the implementation needs to maintain the distinction. Hence, most of the functionality of the public classes can be implemented in a common base class, \class{SuiteInfoBase}, with the accessors for function and method information provided elsewhere. Note that there is only one class which represents function and method information; this parallels the use of the \keyword{def} statement to define both types of elements. Most of the accessor functions are declared in \class{SuiteInfoBase} and do not need to be overridden by subclasses. More importantly, the extraction of most information from a parse tree is handled through a method called by the \class{SuiteInfoBase} constructor. The example code for most of the classes is clear when read alongside the formal grammar, but the method which recursively creates new information objects requires further examination. Here is the relevant part of the \class{SuiteInfoBase} definition from \file{example.py}: \begin{verbatim} class SuiteInfoBase: _docstring = '' _name = '' def __init__(self, tree = None): self._class_info = {} self._function_info = {} if tree: self._extract_info(tree) def _extract_info(self, tree): # extract docstring if len(tree) == 2: found, vars = match(DOCSTRING_STMT_PATTERN[1], tree[1]) else: found, vars = match(DOCSTRING_STMT_PATTERN, tree[3]) if found: self._docstring = eval(vars['docstring']) # discover inner definitions for node in tree[1:]: found, vars = match(COMPOUND_STMT_PATTERN, node) if found: cstmt = vars['compound'] if cstmt[0] == symbol.funcdef: name = cstmt[2][1] self._function_info[name] = FunctionInfo(cstmt) elif cstmt[0] == symbol.classdef: name = cstmt[2][1] self._class_info[name] = ClassInfo(cstmt) \end{verbatim} After initializing some internal state, the constructor calls the \method{_extract_info()} method. This method performs the bulk of the information extraction which takes place in the entire example. The extraction has two distinct phases: the location of the docstring for the parse tree passed in, and the discovery of additional definitions within the code block represented by the parse tree. The initial \keyword{if} test determines whether the nested suite is of the ``short form'' or the ``long form.'' The short form is used when the code block is on the same line as the definition of the code block, as in \begin{verbatim} def square(x): "Square an argument."; return x ** 2 \end{verbatim} while the long form uses an indented block and allows nested definitions: \begin{verbatim} def make_power(exp): "Make a function that raises an argument to the exponent `exp'." def raiser(x, y=exp): return x ** y return raiser \end{verbatim} When the short form is used, the code block may contain a docstring as the first, and possibly only, \constant{small_stmt} element. The extraction of such a docstring is slightly different and requires only a portion of the complete pattern used in the more common case. As implemented, the docstring will only be found if there is only one \constant{small_stmt} node in the \constant{simple_stmt} node. Since most functions and methods which use the short form do not provide a docstring, this may be considered sufficient. The extraction of the docstring proceeds using the \function{match()} function as described above, and the value of the docstring is stored as an attribute of the \class{SuiteInfoBase} object. After docstring extraction, a simple definition discovery algorithm operates on the \constant{stmt} nodes of the \constant{suite} node. The special case of the short form is not tested; since there are no \constant{stmt} nodes in the short form, the algorithm will silently skip the single \constant{simple_stmt} node and correctly not discover any nested definitions. Each statement in the code block is categorized as a class definition, function or method definition, or something else. For the definition statements, the name of the element defined is extracted and a representation object appropriate to the definition is created with the defining subtree passed as an argument to the constructor. The representation objects are stored in instance variables and may be retrieved by name using the appropriate accessor methods. The public classes provide any accessors required which are more specific than those provided by the \class{SuiteInfoBase} class, but the real extraction algorithm remains common to all forms of code blocks. A high-level function can be used to extract the complete set of information from a source file. (See file \file{example.py}.) \begin{verbatim} def get_docs(fileName): import os import parser source = open(fileName).read() basename = os.path.basename(os.path.splitext(fileName)[0]) ast = parser.suite(source) return ModuleInfo(ast.totuple(), basename) \end{verbatim} This provides an easy-to-use interface to the documentation of a module. If information is required which is not extracted by the code of this example, the code may be extended at clearly defined points to provide additional capabilities.
{ "alphanum_fraction": 0.7618217617, "avg_line_length": 43.5126404494, "ext": "tex", "hexsha": "15b46ae57ddf29d6c0d4647a2271f482fc9979a2", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-07-18T21:33:17.000Z", "max_forks_repo_forks_event_min_datetime": "2017-01-30T21:52:13.000Z", "max_forks_repo_head_hexsha": "93e24b88564de120b1296165b5c55975fdcb8a3c", "max_forks_repo_licenses": [ "PSF-2.0" ], "max_forks_repo_name": "jasonadu/Python-2.5", "max_forks_repo_path": "Doc/lib/libparser.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "93e24b88564de120b1296165b5c55975fdcb8a3c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "PSF-2.0" ], "max_issues_repo_name": "jasonadu/Python-2.5", "max_issues_repo_path": "Doc/lib/libparser.tex", "max_line_length": 77, "max_stars_count": 1, "max_stars_repo_head_hexsha": "93e24b88564de120b1296165b5c55975fdcb8a3c", "max_stars_repo_licenses": [ "PSF-2.0" ], "max_stars_repo_name": "jasonadu/Python-2.5", "max_stars_repo_path": "Doc/lib/libparser.tex", "max_stars_repo_stars_event_max_datetime": "2018-08-21T09:19:46.000Z", "max_stars_repo_stars_event_min_datetime": "2018-08-21T09:19:46.000Z", "num_tokens": 7023, "size": 30981 }
\section{Introduction} Even though on-line learning algorithms date back to 1940 when they were first developed by Professor Bernard Widrow and Ted Hoff, his graduate student, under the name ADALINE \cite{adaline}, which essentially comprised stochastic gradient descent, their extensive usage and popularity was not as much advocated. With increasing size of analyzed data, more and more attention was drawn to on-line learning, where computational complexity of an algorithm becomes the limiting factor. In this paper we will present an overview of various gradient descent methods and reason in favor of SGD within the large-scale problem setting and in context of asymptotic analysis. Furthermore, in the last decade phenomena known as Big Data and Internet of Things have drawn even more attention to rapidly increasing rates of collected data. These phenomena and a result of industry scale data sets growing from Gigabytes to Petabytes of data storage are putting increasing pressure on data processing, where sequential algorithms are no longer sufficient. Under such restrictions we will present an overview of possible parallelization approaches of SGD and guide the reader through pressing problems of parallelization and their potential remedies. We will also take a closer look at work of practitioners within the industry and present some of the latest findings in parallelizing SGD to ensure its scalability to numerous processing units.
{ "alphanum_fraction": 0.8334480385, "avg_line_length": 363.25, "ext": "tex", "hexsha": "1041f7c0537c0ed0321feaa16c3f5a5aceda73b5", "lang": "TeX", "max_forks_count": 15, "max_forks_repo_forks_event_max_datetime": "2022-01-20T03:55:35.000Z", "max_forks_repo_forks_event_min_datetime": "2016-05-02T11:00:28.000Z", "max_forks_repo_head_hexsha": "dfe47a40fe57ec5e0ccfd672a8dcaf246386de99", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "nyxcalamity/classwork", "max_forks_repo_path": "tum/computational-aspects-of-machine-learning/paper/introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "dfe47a40fe57ec5e0ccfd672a8dcaf246386de99", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "nyxcalamity/classwork", "max_issues_repo_path": "tum/computational-aspects-of-machine-learning/paper/introduction.tex", "max_line_length": 763, "max_stars_count": 8, "max_stars_repo_head_hexsha": "dfe47a40fe57ec5e0ccfd672a8dcaf246386de99", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "nyxcalamity/classwork", "max_stars_repo_path": "tum/computational-aspects-of-machine-learning/paper/introduction.tex", "max_stars_repo_stars_event_max_datetime": "2020-09-03T00:08:40.000Z", "max_stars_repo_stars_event_min_datetime": "2017-02-10T19:12:21.000Z", "num_tokens": 255, "size": 1453 }
% !TeX root = ../../main.tex \documentclass[../../main.tex]{subfiles} \begin{document} \section{This is a section} This is the section explaination. Parts are made up of subsections and subsections are made up of subsubsections. ((sub)sub)sections can be compiled by temselves. This is helpful for collaboration. So please divide your files appropriately into subfiles. \subfile{sub01/sub01.tex} \subfile{sub02/sub02.tex} \subfile{sub03/sub03.tex} \end{document}
{ "alphanum_fraction": 0.7655913978, "avg_line_length": 35.7692307692, "ext": "tex", "hexsha": "3bfb03f822fa6d0aff1609640cb695e5baa71d89", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "cd0950fdc56563ea12c9685348ecf97c587f36da", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "GangLi-0814/latex-lecture-note-template", "max_forks_repo_path": "tex/section01/section01.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "cd0950fdc56563ea12c9685348ecf97c587f36da", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "GangLi-0814/latex-lecture-note-template", "max_issues_repo_path": "tex/section01/section01.tex", "max_line_length": 140, "max_stars_count": 1, "max_stars_repo_head_hexsha": "cd0950fdc56563ea12c9685348ecf97c587f36da", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "GangLi-0814/latex-lecture-note-template", "max_stars_repo_path": "tex/section01/section01.tex", "max_stars_repo_stars_event_max_datetime": "2021-08-08T14:49:55.000Z", "max_stars_repo_stars_event_min_datetime": "2021-08-08T14:49:55.000Z", "num_tokens": 123, "size": 465 }
\section*{Competing interests} The authors declare that they have no competing interests. \section*{Author's contributions} RCC, MPM, SJC, FXC and BL designed the experiment, RCC and SLC developed the real-time DMN tracking system, SLC and JL provided the custom real-time fMRI sequence, ARM, NTVD, JM, and RCC wrote the manuscript, ARM, JM, CF, SG and RCC performed data analyses, JP, ARM, BP, and CF organized data for release, RCC, CCCB, ARM, NTVD, and MB designed and implemented the behavioral tasks and assessments, RT, AMB, RCC, MPM and SJC managed data collection, AA, MMB, ARM, SC, TPC, CG, AG, JG, SH, MK, AL, AMB, LP, HR, CS, ES, RS, MS, ET, KT, BV, LW, and ABW were involved in participant recruitment and performed data collection, VDC, MK, DL, BM, RW, and DW managed data sharing through the COINs pipeline. \section*{Acknowledgements} We would like to thank Cathy Hu and Raj Sangoi for MRI operation. Data collection and salary support were provided by NIMH BRAINS R01MH101555.
{ "alphanum_fraction": 0.7602808425, "avg_line_length": 110.7777777778, "ext": "tex", "hexsha": "fa590a9fa06e16767317ece690110ff35360a286", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ca510239cc3cf89956e697becc39a0c5ad951b73", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ccraddock/rt_network_decoding", "max_forks_repo_path": "craddock_rt_network_endmatter.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ca510239cc3cf89956e697becc39a0c5ad951b73", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ccraddock/rt_network_decoding", "max_issues_repo_path": "craddock_rt_network_endmatter.tex", "max_line_length": 697, "max_stars_count": 1, "max_stars_repo_head_hexsha": "ca510239cc3cf89956e697becc39a0c5ad951b73", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ccraddock/rt_network_decoding", "max_stars_repo_path": "craddock_rt_network_endmatter.tex", "max_stars_repo_stars_event_max_datetime": "2018-03-18T21:53:19.000Z", "max_stars_repo_stars_event_min_datetime": "2018-03-18T21:53:19.000Z", "num_tokens": 272, "size": 997 }
\begin{comment} \authorinfo{Jacques Carette} {McMaster University} {[email protected]} \authorinfo{Oleg Kiselyov} {FNMOC} {[email protected]} \authorinfo{Chung-chieh Shan} {Rutgers University} {[email protected]} \end{comment} \begin{abstract} \ifdim\parindent=0pt \parindent=1em \fi We have built the first \emph{family} of tagless interpretations for a higher-order typed object language in a typed metalanguage (Haskell or ML) that require no dependent types, generalized algebraic data types, or postprocessing to eliminate tags. The statically type-preserving interpretations include an evaluator, a compiler (or staged evaluator), a partial evaluator, and call-by-name and call-by-value CPS transformers. Our principal technique is to encode \ifshort HOAS \else de Bruijn or higher-order abstract syntax \fi using combinator functions rather than data constructors. In other words, we represent object terms not in an initial algebra but using the coalgebraic structure of the $\lambda$-calculus. Our representation also simulates inductive maps from types to types, which are required for typed partial evaluation and CPS transformations. Our encoding of an object term abstracts uniformly over the family of ways to interpret it, yet statically assures that the interpreters never get stuck. %% To achieve self\hyp interpretation and show Jones\hyp %% optimality, we relate this exemplar of higher-rank and higher-kind %% polymorphism \ifshort\else (provided by ML functors and Haskell~98 %% constructor classes) \fi to plugging a term into a context of %% let\hyp polymorphic bindings. This family of interpreters thus demonstrates again that it is useful to abstract over higher-kinded types. \end{abstract} \begin{quote} \small It should also be possible to define languages with a highly refined syntactic type structure. Ideally, such a treatment should be metacircular, in the sense that the type structure \linebreak[1] used in the defined language should be adequate for the defining language. \rm \ifshort \hfill John Reynolds~\else\\\fi\citep{reynolds-definitional} \end{quote} %\category{CR-number}{subcategory}{third-level} %\terms term1, term2 %\keywords keyword1, keyword2 \section{Introduction}\label{intro} A popular way to define and implement a language is to embed it in another \citep{landin-next}. Embedding means to represent terms and values of the \emph{object language} as terms and values in the \emph{metalanguage}, so as to interpret the former in the latter \citep{reynolds-definitional}. Embedding is especially appropriate for domain\hyp specific languages (DSLs) because it supports rapid prototyping and integration with the host environment \citep{hudak-building}. Most interpreters suffer from various kinds of overhead, making it less efficient to run object programs via the metalanguage than to implement the object language directly on the machine running the metalanguage \citep{jones-partial}. Two major sources of overhead are dispatching on the syntax of object terms and tagging the types of object values. If the metalanguage supports code generation \citep{nielson-two-level,nielson-automatic,gomard-partial,bawden-quasiquotation,taha-sound}, then the embedding can avoid the dispatching overhead by compiling object programs, that is, by specializing an interpreter to object programs \citep{futamura-partial}. Specializing an interpreter is thus a promising way to build a DSL\@. However, the tagging overhead remains, especially if the object language and the metalanguage both have a sound type system. The quest to remove all interpretive overhead, in particular by specializing the interpreter using a \emph{Jones-optimal} partial evaluator \citep{jones-partial}, has motivated much work on typed specialization \citep{danvy-simple,Danvy-tagging-encoding,hughes-type,Birkedal-PE-ML,taha-tag,Makholm-TagElim} and type systems (see~\S\ref{s:fancier}). \begin{figure} \begin{floatrule} \begin{proofrules} \[ \[ [x:t_1] \proofoverdots e:t_2 \] \justifies \fun{x}e:t_1\to t_2 \] \[ \[ [f:t_1\to t_2] \proofoverdots e:t_1\to t_2 \] \justifies \fix{f}e:t_1\to t_2 \] \[ e_0:t_1\to t \quad e_1:t_1 \justifies e_0 e_1: t \] \[ \text{$n$ is an integer} \justifies n:\ZZ \] \[ \text{$b$ is a boolean} \justifies b:\BB \] \[ e:\BB \quad e_1:t \quad e_2:t \justifies \cond{e}{e_1}{e_2}:t \] \[ e_1:\ZZ \quad e_2:\ZZ \justifies e_1+e_2:\ZZ \] \[ e_1:\ZZ \quad e_2:\ZZ \justifies e_1 \times e_2:\ZZ \] \[ e_1:\ZZ \quad e_2:\ZZ \justifies e_1 \le e_2:\BB \] \end{proofrules} \end{floatrule} \caption{Our typed object language} \label{fig:object} \end{figure} This paper shows how to eliminate tagging overhead, whether in the context of code generation and whether in the presence of dispatching overhead. We use metalanguage types, without such fancy features as generalized algebraic data types (GADTs) or dependent types, to rule out ill-typed object terms statically, thus speeding up interpretation and assuring that our interpreters do not get stuck. We illustrate the problem of tagging overhead in this section using a simple evaluator as example. We apply our solution first to evaluation, then to code\hyp generation tasks such as partial evaluation. \ifshort We leave aside the solved problem of writing a parser\slash type\hyp checker, for embedding object language objects into the metalanguage (whether using dependent types \citep{WalidICFP02} or not \citep{baars-typing}), and just enter them by hand. \fi \subsection{The tag problem}\label{tagproblem} \begin{SaveVerbatim}{2a} type var = VZ | VS of var type exp = V of var | B of bool | L of exp | A of exp * exp \end{SaveVerbatim} \begin{SaveVerbatim}[commandchars=\@\{\}]{2b} let rec lookup (x::env) = function VZ -> x | VS v -> lookup env v let rec eval0 env = function | V v -> lookup env v | B b -> b | L e -> fun x -> eval0 (x::env) e | A (e1,e2) -> (eval0 env e1) (eval0 env e2) \end{SaveVerbatim} \begin{SaveVerbatim}{2c} type u = UB of bool | UA of (u -> u) \end{SaveVerbatim} \begin{SaveVerbatim}{2d} let rec eval env = function | V v -> lookup env v | B b -> UB b | L e -> UA (fun x -> eval (x::env) e) | A (e1,e2) -> match eval env e1 with UA f -> f (eval env e2) \end{SaveVerbatim} \begin{SaveVerbatim}{test1} let test1 = A (L (V VZ), B true) \end{SaveVerbatim} % see tagless\_interp1.ml, module Tagfull for the complete code. % If you change the code in here, please adjust the .ml file % accordingly. Let the paper and the accompanying code be in sync. To be concrete, we use the typed object language in Figure~\ref{fig:object} throughout this paper. It is straightforward to create an algebraic data type, say in OCaml% \ifshort, Figure~\ref{fig:tag-problem}(a)\fi, to represent object terms such as those in Figure~\ref{fig:object}. For brevity, we elide treating integers, conditionals, and fixpoint in this section. \ifshort\else\UseVerbatim{2a}\fi We represent each variable using a unary de Bruijn index.% \footnote{We use de Bruijn indices to simplify the comparison with \citearound{'s work}\citet{WalidICFP02}.} For example, we represent the object term $(\fun{x}x)\True$ as \ifshort \BUseVerbatim{test1}.\else \UseVerbatim{test1}\fi \ifshort \begin{figure} % (a) \BUseVerbatim{2a} \smallskip (b) \BUseVerbatim{2b} \smallskip (c) \BUseVerbatim{2c} \smallskip (d) \BUseVerbatim{2d} \medskip \caption{OCaml code illustrating the tag problem} \label{fig:tag-problem} \end{figure} \fi Let us try to implement an interpreter function |eval0|\ifshort, Figure~\ref{fig:tag-problem}(b)\fi. It takes an object term such as |test1| above and gives us its value. The first argument to |eval0| is the environment, initially empty, which is the list of values bound to free variables in the interpreted code. \ifshort\else\UseVerbatim{2b}\fi If our OCaml-like metalanguage were untyped, the code above would be acceptable. The |L e| line exhibits interpretive overhead: |eval0| traverses the function body~|e| every time (the result of evaluating) |L e| is applied. Code generation can be used to remove this interpretive overhead \citep{jones-partial,futamura-partial,WalidICFP02}. However, the function |eval0| is ill-typed if we use OCaml or some other typed language as the metalanguage. The line |B b| says that |eval0| returns a boolean, whereas the next line |L e| says the result is a function, but all branches of a pattern-match form must yield values of the same type. A related problem is the type of the environment |env|: a regular OCaml list cannot hold both boolean and function values. The usual solution is to introduce a universal type containing booleans and functions\ifshort, Figure~\ref{fig:tag-problem}(c)\fi. \ifshort\else\UseVerbatim{2c}\fi We can then write a typed interpreter\ifshort, Figure~\ref{fig:tag-problem}(d), \else\UseVerbatim{2d}\fi whose inferred type is |u list -> exp -> u|. Now we can evaluate \ifshort \texttt{eval [] test1} obtaining |UB true|. \else \begin{code} let test1r = eval [] test1 val test1r : u = UB true \end{code} \fi The unfortunate tag |UB| in the result reflects that |eval| is a partial function. First, the pattern match |with UA f| in the line |A (e1,e2)| is not exhaustive, so |eval| can fail if we apply a boolean, as in the ill-typed term |A (B true, B false)|. \ifshort\else \begin{code} let test2 = A (B true, B false) let test2r = eval [] test2 Exception: Match_failure in eval \end{code} \fi Second, the |lookup| function assumes a nonempty environment, so |eval| can fail if we evaluate an open term \ifshort |A (L (V (VS VZ)), B true)|. \else \begin{code} let test3 = A (L (V (VS VZ)), B true) let test3r = eval [] test3 Exception: Match_failure in lookup \end{code} \fi After all, the type |exp| represents object terms both well-typed and ill-typed, both open and closed. Although |eval| never fails on well-typed closed terms, this soundness is not obvious to the metalanguage, whose type system we must still appease with the nonexhaustive pattern matching in |lookup| and |eval| and the tags |UB| and |UA|. In other words, the algebraic data types above fail to express in the metalanguage that the object program is well-typed. This failure necessitates tagging and nonexhaustive pattern\hyp matching operations that incur a performance penalty in interpretation and impair optimality in partial evaluation \citep{jones-partial,taha-tag}. In short, the universal\hyp type solution is unsatisfactory because it does not preserve the type of the encoded term. \ifshort\else\subsection{Solutions using fancier types}\label{s:fancier}\fi It is commonly thought that the type-preserving interpretation of a typed object language in a typed metalanguage is difficult and requires GADTs or dependent types \citep{taha-tag}. In fact, this problem motivated much work on GADTs \citep{xi-guarded,peyton-jones-simple} and on dependent types \citep{WalidICFP02,fogarty-concoqtion}\ifshort\else, in order for the metalanguage's type system to allow the well-typed object term |test1| but disallow the ill-typed object term |test2|\fi. Yet other fancy type systems have been proposed to distinguish closed terms like |test1| from open terms \ifshort\citep{WalidPOPL03,NanevskiJFP05,DaviesJACM01}\else like |test3| \citep{WalidPOPL03,NanevskiICFP02,NanevskiJFP05,DaviesJACM01,nanevski-contextual}\fi, so that |lookup| never receives an empty environment. \subsection{Our final proposal}\label{ourapproach} Following an old idea of \citet{reynolds-user-defined}, we represent object programs using ordinary functions rather than data constructors. These functions comprise the entire interpreter: \ifshort \begin{code3} let varZ env = fst env let b (bv:bool) env = bv let varS vp env = vp (snd env) let lam e env = fun x -> e (x,env) let app e1 e2 env = (e1 env) (e2 env) \end{code3} \else \begin{code} let varZ env = fst env let varS vp env = vp (snd env) let b (bv:bool) env = bv let lam e env = fun x -> e (x,env) let app e1 e2 env = (e1 env) (e2 env) \end{code} \fi We now represent our sample term $(\fun{x}x)\True$ as \ifshort \texttt{let testf1 = app (lam varZ) (b true)}. \else \begin{code} let testf1 = app (lam varZ) (b true) \end{code} \fi This representation is almost the same as in \S\ref{tagproblem}, only written with lowercase identifiers. To evaluate an object term is to apply its representation to the empty environment\ifshort , |testf1 ()|, obtaining |true|\fi. \ifshort\else \begin{code} let testf1r = testf1 () val testf1r : bool = true \end{code} \fi The result has no tags: the interpreter patently uses no tags and no pattern matching. The term |b true| evaluates to a boolean and the term |lam varZ| evaluates to a function, both untagged. The |app| function applies |lam varZ| without pattern matching. What is more, evaluating an open term such as \ifshort \texttt{app (lam (varS varZ)) (b true)} \else |testf3| below \fi gives a type error rather than a run-time error. \ifshort\else \begin{code}[commandchars=\\\{\}] let testf3 = app (lam (varS varZ)) (b true) let testf3r = testf3 \underline{()} This expression has type unit but is here used with type 'a * 'b \end{code} \fi The type error correctly complains that the initial environment should be a tuple rather than~|()|. In other words, the term is open. In sum, using ordinary functions rather than data constructors to represent well-typed terms, we achieve a tagless evaluator for a typed object language in a metalanguage with a simple \ifshort Hindley-Milner \fi type system\ifshort\else\ \citep{hindley-principal,milner-theory}\fi. We call this approach \emph{final} (in contrast to \emph{initial}), because we represent each object term not by its abstract syntax but by its denotation in a semantic algebra. This representation makes it trivial to implement a primitive recursive function over object terms, such as an evaluator. Or, as a referee puts it aptly, our proposal is ``a way to write a typed fold function over a typed term.'' We emphasize ``typed'' and ``fold'' in the previous sentence. We use a typed version of \citets{Mogensen-SelfApplicable} encoding of the recursive type of terms \citep{bohm-automatic}, which makes it much easier to write folds over terms than term functions that are not primitive recursive (or, compositional). In contrast, \citearound{'s earlier encoding of the sum type of terms}\citet{JFP-Mogensen} does not privilege folds. In exchange, we statically express object types in the metalanguage and prevent both kinds of run-time errors in \S\ref{tagproblem}, due to evaluating ill-typed or open terms. Because the new interpreter uses no universal type or pattern matching, it never gives a run-time error, and is in fact total. Because this safety is obvious not just to us but also to the metalanguage implementation, we avoid the serious performance penalty \citep{WalidICFP02} that arises from error checking at run time. Our solution does \emph{not} involve Church-encoding the universal type. The Church encoding of the type~|u| in \S\ref{tagproblem} requires two continuations; the function |app| in the interpreter above would have to provide both to the encoding of~|e1|. The continuation corresponding to the |UB| case of~|u| must either raise an error or loop. For a well-typed object term, that error continuation is never invoked, yet it must be supplied. In contrast, our interpreter has no error continuation at all. The evaluator above is wired directly into functions such as |b|, |lam|, and |app|, whose names appear free in |testf1| above. \ifshort We \else In the rest of this paper, we \fi explain how to abstract over these functions' definitions and apply different folds to the same object language, so as to process the same term using many other interpreters: we can \begin{itemize} \item evaluate the term to a value in the metalanguage; \item measure the length of the term; \item compile the term, with staging support such as in MetaOCaml; \item partially evaluate the term, online; and \item transform the term to continuation\hyp passing style (CPS), even call-by-name (CBN) CPS in a call-by-value (CBV) metalanguage, so as to isolate the evaluation order of the object language from that of the metalanguage.\ifshort \footnote{Due to serious lack of space, we refer the reader to the accompanying code for this.}\fi \end{itemize} We have programmed all our interpreters and examples in OCaml (and, for staging, \citet{metaocaml}) and standard Haskell. The complete code is available at \url{http://okmij.org/ftp/tagless-final/} to supplement the paper. \ifshort For simplicity, main examples in the paper will be in MetaOCaml; all examples have also been implemented in Haskell. \else Except for the basic definitions in \S\ref{encoding}, we show our examples in (Meta)OCaml even though some of our claims are more obvious in Haskell, for consistency and because MetaOCaml provides convenient, typed staging facilities. \fi \subsection{Contributions}\label{contributions} We attack the problem of tagless (staged) type-preserving interpretation exactly as it was posed by \citet{WalidICFP02} and \citet{xi-guarded}. We use their running examples and achieve the result they call desirable. Our contributions are as follows. \begin{enumerate} \item We build the first \emph{family} of interpreters, each instantiating the \emph{same} signature, that evaluate (\S\ref{language}), compile (\S\ref{S:compiler}), and partially evaluate (\S\ref{PE}) a typed higher-order object language in a typed metalanguage, in direct and continuation\hyp passing styles\ifshort\else\ (\S\ref{variations})\fi. \item These interpreters use no type tags and need no advanced type-system features such as GADTs, dependent types, or intensional type analysis. Yet the type system of the metalanguage assures statically that each object program is well-typed and closed, and that each interpreter preserves types and never gets stuck. In particular, our (online) partial evaluator and CPS transformers avoid GADTs in their implementation and stay portable across Haskell 98 and ML, by expressing in their interface an inductive map from input types to output types. \item Our clean, comparable implementations using OCaml modules and Haskell type classes show how to parametrize our final representation of object terms over multiple ways to assign them meanings. \item We point a clear way to extend the object language with more features such as state\ifshort\else~(\S\ref{state})\fi.\ifshort\footnote{Again, please see our code.}\fi \ Our term encoding is contravariant in the object language, so extending the language does not invalidate terms already encoded. %% \item We describe an approach to self\hyp interpretation compatible with the %% above\ifshort\else~(\S\ref{selfinterp})\fi. Self\hyp interpretation turned %% out to be harder than expected.\ifshort\footnotemark[\value{footnote}]\fi \item We show how to use higher-kinded abstraction to build embedded DSLs. \end{enumerate} Our code is surprisingly simple and obvious in hindsight, but it has been cited as a difficult problem (\cite{sumii-hybrid} and \cite{Thiemann-combinators} notwithstanding) to interpret a typed object language in a typed metalanguage without tagging or type\hyp system extensions. For example, \citet{taha-tag} say that ``expressing such an interpreter in a statically typed programming language is a rather subtle matter. In fact, it is only recently that some work on programming type-indexed values in ML \citep{yang-encoding} has given a hint of how such a function can be expressed.'' We discuss related work in~\S\ref{related}. To reiterate, we do \emph{not} propose any new language feature or \ifshort new \else even any new programming \fi technique. \ifshort We \else Rather, we \fi solve a problem that was stated in the published record as open and likely unsolvable in ML or Haskell 98 without extensions, by a novel combination of simple types and techniques already described in the literature that use features present in mainstream functional languages. In particular, we follow \citets{yang-encoding} encoding of type-indexed values, \citets{Sperber-SelfApplicable} and \citets{asai-binding-time} construction of dynamic terms alongside static terms, and \citets{Thiemann-combinators} deforestation of syntax constructors. These techniques require just a Hindley-Milner type system with either module functors or constructor classes, as realized in all variants of ML and Haskell. The simplicity of our solution and its use of only mainstream features \ifshort\else are virtues that \fi make it more practical to build typed, embedded DSLs. \ifshort\else However we represent an object term, the representation can be created either by hand (for example, by entering object terms at a metalanguage interpreter's prompt) or by parsing and type-checking text. It is known how to write such a type checker for a higher-order object language such as ours, whether using fancy types \citep{Guillemette-Monier-PLPV,WalidICFP02} or not \citep{baars-typing}. We have ourselves implemented a type checker for our object language (in the accompanying source file |IncopeTypecheck.hs|), which maps an ordinary syntax tree to (either a type error or) a finally encoded object term that can then be interpreted in multiple ways without repeated type-checking. We leave this problem aside in the rest of this paper. \fi \section{The object language and its tagless interpreters}\label{language} Figure~\ref{fig:object} shows our object language, a simply-typed $\lambda$-calculus with fixpoint, integers, booleans, and comparison. The language is similar to \citearound{'s PCF}\citet{plotkin-lcf}. It is also close to \citets{xi-guarded}, without their polymorphic lift but with more constants so as to more conveniently express examples. In contrast to \S\ref{intro}, in the rest of the paper we use higher-order abstract syntax (HOAS) \citep{miller-manipulating,pfenning-higher-order} rather than de Bruijn indices to encode binding and ensure that our object programs are closed. We find HOAS to be more convenient, but we have also implemented our approach using de Bruijn indices (in \S\ref{S:de-Bruijn} and the accompanying source file |incope-dB.ml|). \subsection{How to make encoding flexible: abstract the interpreter} \label{encoding} \ifshort \begin{SaveVerbatim}{3a} class Symantics repr where int :: Int -> repr Int; bool :: Bool -> repr Bool lam :: (repr a -> repr b) -> repr (a -> b) app :: repr (a -> b) -> repr a -> repr b fix :: (repr a -> repr a) -> repr a add :: repr Int -> repr Int -> repr Int mul :: repr Int -> repr Int -> repr Int leq :: repr Int -> repr Int -> repr Bool if_ :: repr Bool -> repr a -> repr a -> repr a \end{SaveVerbatim} \else \begin{SaveVerbatim}{3a} class Symantics repr where int :: Int -> repr Int bool :: Bool -> repr Bool lam :: (repr a -> repr b) -> repr (a -> b) app :: repr (a -> b) -> repr a -> repr b fix :: (repr a -> repr a) -> repr a add :: repr Int -> repr Int -> repr Int mul :: repr Int -> repr Int -> repr Int leq :: repr Int -> repr Int -> repr Bool if_ :: repr Bool -> repr a -> repr a -> repr a \end{SaveVerbatim} \fi \begin{SaveVerbatim}{3b} testpowfix () = lam (\x -> fix (\self -> lam (\n -> if_ (leq n (int 0)) (int 1) (mul x (app self (add n (int (-1)))))))) \end{SaveVerbatim} \begin{SaveVerbatim}{3c} testpowfix7 () = lam (\x -> app (app (testpowfix ()) x) (int 7)) \end{SaveVerbatim} We embed our language in (Meta)OCaml and Haskell. In Haskell, the functions that construct object terms are methods in a type class |Symantics| (with a parameter |repr| of kind |* -> *|)\ifshort, Figure~\ref{fig:symantics-haskell}(a)\fi. The class is so named because its interface gives the syntax of the object language and its instances give the semantics. \ifshort\else\UseVerbatim{3a}\fi For example, we encode the term |test1|, or $(\fun{x}x)\True$, from \S\ref{tagproblem} above as \texttt{app (lam (\textbackslash x -> x)) (bool True)}, whose inferred type is \texttt{Symantics repr => repr Bool}. For another example, the classical $\mathit{power}$ function is \ifshort in Figure~\ref{fig:symantics-haskell}(b) \else\UseVerbatim{3b}\fi and the partial application $\fun{x} \mathit{power}\;x\;7$ is \ifshort in Figure~\ref{fig:symantics-haskell}(c). \else\UseVerbatim{3c}\fi The dummy argument |()| above is to avoid the monomorphism restriction, to keep the type of |testpowfix| and |testpowfix7| polymorphic in |repr|. \ifshort\else Instead of supplying this dummy argument, we could have given the terms explicit polymorphic signatures. We however prefer for Haskell to infer the object types for us. We could also avoid the dummy argument by switching off the monomorphism restriction with a compiler flag. \fi The methods |add|, |mul|, and |leq| are quite similar, and so are |int| and |bool|. Therefore, we often elide all but one method of each group. The accompanying code has the complete implementations. \ifshort \begin{figure} (a) \BUseVerbatim{3a} \smallskip (b) \BUseVerbatim{3b} \smallskip (c) \BUseVerbatim{3c} \medskip \caption{Symantics in Haskell} \label{fig:symantics-haskell} \end{figure} \fi \begin{SaveVerbatim}{ocaml-simple} module type Symantics = sig type ('c, 'dv) repr val int : int -> ('c, int) repr val bool: bool -> ('c, bool) repr val lam : (('c, 'da) repr -> ('c, 'db) repr) -> ('c, 'da -> 'db) repr val app : ('c, 'da -> 'db) repr -> ('c, 'da) repr -> ('c, 'db) repr val fix : ('x -> 'x) -> (('c, 'da -> 'db) repr as 'x) val add : ('c, int) repr -> ('c, int) repr -> ('c, int) repr val mul : ('c, int) repr -> ('c, int) repr -> ('c, int) repr val leq : ('c, int) repr -> ('c, int) repr -> ('c, bool) repr val if_ : ('c, bool) repr -> (unit -> 'x) -> (unit -> 'x) -> (('c, 'da) repr as 'x) end \end{SaveVerbatim} \begin{SaveVerbatim}{ocaml-examples} module EX(S: Symantics) = struct open S let test1 () = app (lam (fun x -> x)) (bool true) let testpowfix () = lam (fun x -> fix (fun self -> lam (fun n -> if_ (leq n (int 0)) (fun () -> int 1) (fun () -> mul x (app self (add n (int (-1)))))))) let testpowfix7 = lam (fun x -> app (app (testpowfix ()) x) (int 7)) end \end{SaveVerbatim} \ifshort \begin{figure}[t] \begin{tabular}{@{}l@{}} \ifx\relax\normalbaselineskip\else\baselineskip\normalbaselineskip\fi \BUseVerbatim[baseline=b]{ocaml-simple}\\[\smallskipamount] \ifx\relax\normalbaselineskip\else\baselineskip\normalbaselineskip\fi \BUseVerbatim[baseline=t]{ocaml-examples} \end{tabular} \medskip \caption{A simple (Meta)OCaml embedding of our object language, and examples} \label{fig:ocaml-simple} \label{fig:ocaml-examples} \end{figure} \else \begin{figure}[t] \begin{floatrule} \BUseVerbatim{ocaml-simple} \end{floatrule} \caption{A simple (Meta)OCaml embedding of our object language} \label{fig:ocaml-simple} \end{figure} \begin{figure}[t] \begin{floatrule} \BUseVerbatim{ocaml-examples} \end{floatrule} \caption{Examples using the embedding in Figure~\ref{fig:ocaml-simple} of our object language} \label{fig:ocaml-examples} \end{figure} \fi To embed the same object language in (Meta)OCaml, we replace the \ifshort type class \fi |Symantics| \ifshort\else type class \fi and its instances by a module signature |Symantics| and its implementations. Figure~\ref{fig:ocaml-simple} shows a simple signature that suffices until~\S\ref{PE}. The two differences are: the additional type parameter |'c|, an \emph{environment classifier} \citep{WalidPOPL03} required by MetaOCaml for code generation in~\S\ref{S:compiler}; and the $\eta$-expanded type for |fix| and thunk types in |if_| since OCaml is a call-by-value language. We shorten some of the types using OCaml's |as| syntax. The functor |EX| in Figure~\ref{fig:ocaml-examples} encodes our running examples |test1| and the $\mathit{power}$ function (|testpowfix|). The dummy argument to |test1| and |testpowfix| is an artifact of MetaOCaml: in order for us to run a piece of generated code, it must be polymorphic in its environment classifier (the type variable |'c| in Figure~\ref{fig:ocaml-simple}), so we must define our object terms as syntactic values to satisfy the value restriction. (Alternatively, we could have used OCaml's rank-2 record types to maintain the necessary polymorphism.) \ifshort\else\par\fi Thus, we represent an object expression in OCaml as a functor from |Symantics| to a semantic domain. This is essentially the same as the constraint \texttt{Symantics repr =>} in the Haskell embedding. Comparing |Symantics| with Figure~\ref{fig:object} shows how to represent \emph{every} well-typed object term in the metalanguage. We formalize this representation by defining $H$ and~$M$, two inductive maps from terms and types in our object language to terms and types in Haskell and OCaml:\pagebreak \begin{align} H(\ZZ) &= \texttt{Int} & M(\ZZ) &= \texttt{int} \notag\\ H(\BB) &= \texttt{Bool} & M(\BB) &= \texttt{bool} \notag\\ H(t_1 \to t_2) &= H(t_1) \mathbin{\texttt{->}} H(t_2) & M(t_1 \to t_2) &= M(t_1) \mathbin{\texttt{->}} M(t_2) \displaybreak[0] \\ H(x) &= x & M(x) &= x \notag\\ H(\fun{x}e) &= \texttt{lam (\textbackslash $x$ -> $H(e)$)} & M(\fun{x}e) &= \texttt{lam (fun $x$ -> $M(e)$)} \notag\\ H(\fix{f}e) &= \texttt{fix (\textbackslash $f$ -> $H(e)$)} & M(\fix{f}e) &= \texttt{fix (fun $f$ -> $M(e)$)} \notag\\ H(e_1 e_2) &= \texttt{app $H(e_1)$ $H(e_2)$} & M(e_1 e_2) &= \texttt{app $M(e_1)$ $M(e_2)$} \notag\\ H(n) &= \texttt{int $n$} & M(n) &= \texttt{int $n$} \notag\\ H(\True) &= \texttt{bool True} & M(\True) &= \texttt{bool true} \notag\\ H(\False) &= \texttt{bool False} & M(\False) &= \texttt{bool false} \notag\\ \!H(\cond{e}{e_1}{e_2}) &= \rlap{\texttt{if\_ $H(e)$ $H(e_1)$ $H(e_2)$}} \notag\\ \!M(\cond{e}{e_1}{e_2}) &= \rlap{\texttt{if\_ $M(e)$ (fun () -> $M(e_1)$) (fun () -> $M(e_2)$)}} \notag\\ H(e_1 + e_2) &= \texttt{add $H(e_1)$ $H(e_2)$} & M(e_1 + e_2) &= \texttt{add $M(e_1)$ $M(e_2)$} \notag\\ H(e_1 \times e_2) &= \texttt{mul $H(e_1)$ $H(e_2)$} & M(e_1 \times e_2) &= \texttt{mul $M(e_1)$ $M(e_2)$} \notag\\ H(e_1 \le e_2) &= \texttt{leq $H(e_1)$ $H(e_2)$} & M(e_1 \le e_2) &= \texttt{leq $M(e_1)$ $M(e_2)$} \end{align} These definitions assume that our object language, Haskell, and OCaml use the same variable names~$x$ and integer literals~$n$. If $\Gamma$ is a typing context $x_1:t_1,\dotsc,x_n:t_n$ in the object language, then we define the metalanguage contexts \begin{align} \texttt{repr\;$H(\Gamma)$} &= x_1:\texttt{repr}\;H(t_1),\dotsc,x_n:\texttt{repr}\;H(t_n) \text, \\ \texttt{('c,\:$M(\Gamma)$)\;repr} &= x_1:\texttt{('c,\:$M(t_1)$)\;repr},\dotsc,x_n:\texttt{('c,\:$M(t_n)$)\;repr} \text. \end{align} The following proposition states the trivial but fundamental fact that this representation preserves types. \begin{proposition}\label{prop:typing} If an object term~$e$ has the type~$t$ in the context~$\Gamma$, then the Haskell term~$H(e)$ has the type \texttt{repr\;$H(t)$} in the context \[ \texttt{repr}:\star\to\star,\quad \texttt{Symantics repr},\quad \texttt{repr}\;H(\Gamma) \text, \] and the OCaml term~$M(e)$ has the type \texttt{('c,\:$M(t)$)\;repr} in the context \[ \texttt{S:Symantics},\quad \texttt{open S},\quad \texttt{'c}:\star,\quad \texttt{('c,\:$M(\Gamma)$)\;repr} \text. \] \end{proposition} \begin{proof} By structural induction on the derivation in the object language that $e$ has type~$t$ in~$\Gamma$. \end{proof} \begin{corollary}\label{cor:typing} If a closed object term~$e$ has the type~$t$, then the Haskell term~$H(e)$ has the type \[ \texttt{forall repr. Symantics repr => repr $H(t)$} \] and the OCaml functor \[ \texttt{functor (S:Symantics) -> struct open S let term () = $M(e)$ end} \] has the signature \[ \texttt{functor (S:Symantics) -> sig val term: unit -> ('c,\:$M(t)$)\;S.repr end} \text{.} \] \end{corollary} Conversely, the type system of the metalanguage checks that the represented object term is well-typed and closed. If we err, say replace |int 7| with |bool True| in |testpowfix7|, the type checker will complain there that the expected type |Int| does not match the inferred |Bool|. Similarly, the object term $\fun{x}xx$ and its encoding |lam (\x -> app x x)| both fail occurs-checks in type checking. Both Haskell's and MetaOCaml's type checkers also flag syntactically invalid object terms, such as if we forget |app| somewhere above. Because our encoding of terms and types are so straightforward and metacircular, these error messages from the metalanguage implementation are just about as readable as those for ``native'' type errors such as |fun x -> x x|. \subsection{Two tagless interpreters} \label{S:interpreter-RL} Now that our term representation is independent of any particular interpreter, we are ready to present a series of interpreters. Each interpreter is an instance of the |Symantics| class in Haskell and a module implementing the |Symantics| signature in OCaml. The first interpreter evaluates an object term to its value in the metalanguage. The module |R| below is metacircular in that it \emph{runs} each object\hyp language operation by executing the corresponding metalanguage operation. \ifshort \begin{code3} module R = struct type ('c,'dv) repr = 'dv (* no wrappers *) let int (x:int) = x let bool (b:bool) = b let lam f = f let app e1 e2 = e1 e2 let fix f = let rec self n = f self n in self let add e1 e2 = e1 + e2 let mul e1 e2 = e1 * e2 let leq e1 e2 = e1 <= e2 let if_ eb et ee = if eb then et () else ee () end \end{code3} \else \begin{code} module R = struct type ('c,'dv) repr = 'dv (* no wrappers *) let int (x:int) = x let bool (b:bool) = b let lam f = f let app e1 e2 = e1 e2 let fix f = let rec self n = f self n in self let add e1 e2 = e1 + e2 let mul e1 e2 = e1 * e2 let leq e1 e2 = e1 <= e2 let if_ eb et ee = if eb then et () else ee () end \end{code} \fi % As in~\S\ref{ourapproach}, this interpreter is patently tagless, using neither a universal type nor any pattern matching: the operation |add| is really OCaml's addition, and |app| is OCaml's application. To run our examples, we instantiate the |EX| functor from~\S\ref{encoding} with~|R|\ifshort: \texttt{module EXR = EX(R)}\fi. \ifshort\else \begin{code} module EXR = EX(R) \end{code} \fi Thus, |EXR.test1 ()| evaluates to the untagged boolean value |true|. \begin{comment} %% commenting out this Haskell in JFP version as well In Haskell, we define \begin{code} newtype R a = R {unR::a} instance Symantics R where ... \end{code} Although |R| looks like a tag, it is only a |newtype|. The types |a| and |R a| are represented differently only at compile time, not at run time. Pattern matching against~|R| cannot ever fail and is assuredly compiled away. \end{comment} It is obvious to the compiler that pattern matching cannot fail, because there is no pattern matching. Evaluation can only fail to yield a value due to interpreting |fix|. The soundness of the object language's type system with respect to the dynamic semantics specified by a definitional interpreter follows from the soundness of the metalanguage's type system. \ifshort (The source code shows a total interpreter |L| that measures the length of each object term.) \fi \ifshort We can also generalize from~|R| to all interpreters; these propositions follow immediately from the soundness of the metalanguage's type system. \fi \begin{proposition} If a closed object term~$e$ has type~$t$, and the OCaml module |I| implements the signature |Symantics|, then under the OCaml module definition \[ \texttt{\begin{tabular}{@{}l@{}} module RESULT = \\ \quad (functor (S:Symantics) -> struct open S let term () = $M(e)$ end) \\ \quad (I) \end{tabular}} \] evaluating the expression \texttt{RESULT.term ()} never gets stuck: it either does not terminate or evaluates to a value of type \texttt{('c,\:$M(t)$)\;I.repr} (polymorphic over~|'c|). \end{proposition} \begin{proof} By Corollary~\ref{cor:typing} and the type soundness of (this fragment of) OCaml. \end{proof} \begin{corollary} If a closed object term~$e$ has type~$t$, then under the OCaml module definition \[ \texttt{\begin{tabular}{@{}l@{}} module RESULT = \\ \quad (functor (S:Symantics) -> struct open S let term () = $M(e)$ end) \\ \quad (R) \end{tabular}} \] evaluating the expression \texttt{RESULT.term ()} never gets stuck: it either does not terminate or evaluates to a value of type~$M(t)$. \end{corollary} \ifshort\else For variety, we show another interpreter~|L|, which measures the \emph{length} of each object term, defined as the number of term constructors. \begin{code} module L = struct type ('c,'dv) repr = int let int (x:int) = 1 let bool (b:bool) = 1 let lam f = f 0 + 1 let app e1 e2 = e1 + e2 + 1 let fix f = f 0 + 1 let add e1 e2 = e1 + e2 + 1 let mul e1 e2 = e1 + e2 + 1 let leq e1 e2 = e1 + e2 + 1 let if_ eb et ee = eb + et () + ee () + 1 end \end{code} Now the OCaml expression |let module E = EX(L) in E.test1 ()| evaluates to |3|. This interpreter is not only tagless but also total. It ``evaluates'' even seemingly divergent terms; for instance, |app (fix (fun self -> self)) (int 1)| evaluates to $3$. \fi \begin{comment} \begin{code} module EX1(S: Symantics) = struct open S let tfix () = app (fix (fun self -> self)) (int 1) end;; let module E =EX1(R) in E.tfix ();; let module E =EX1(L) in E.tfix ();; \end{code} \end{comment} \subsection{Higher-order abstract syntax versus de Bruijn indices} \label{S:de-Bruijn} \begin{figure} \begin{floatrule} \begin{BVerbatim} module type Symantics = sig type ('c,'h,'dv) repr type ('c,'dv) vr (* variable representation *) val vz : ('c, ('c,'d) vr * 'h, 'd) repr val vs : ('c, 'h, 'd) repr -> ('c, _ * 'h, 'd) repr val int : int -> ('c,'h,int) repr val bool: bool -> ('c,'h,bool) repr val lam : ('c, ('c,'da) vr * 'h, 'db) repr -> ('c,'h,'da->'db) repr val app : ('c,'h,'da->'db) repr -> ('c,'h,'da) repr -> ('c,'h,'db) repr val fix : ('c, ('c,'da->'db) vr * 'h, 'da->'db) repr -> ('c, 'h, 'da->'db) repr val add : ('c,'h,int) repr -> ('c,'h,int) repr -> ('c,'h,int) repr val mul : ('c,'h,int) repr -> ('c,'h,int) repr -> ('c,'h,int) repr val leq : ('c,'h,int) repr -> ('c,'h,int) repr -> ('c,'h,bool) repr val if_ : ('c,'h,bool) repr -> (unit -> 'x) -> (unit -> 'x) -> (('c,'h,'da) repr as 'x) end module R = struct type ('c,'h,'dv) repr = 'h -> 'dv type ('c,'d) vr = 'd let vz (x,_) = x let vs v (_,h) = v h let int (x:int) h = x let bool (b:bool) h = b let lam f h = fun x -> f (x,h) let app e1 e2 h = (e1 h) (e2 h) let fix f h = let rec self n = f (self,h) n in self let add e1 e2 h = e1 h + e2 h let mul e1 e2 h = e1 h * e2 h let leq e1 e2 h = e1 h <= e2 h let if_ eb et ee h = if eb h then et () h else ee () h end \end{BVerbatim} \end{floatrule} \caption{Embedding and evaluating our object language using de Bruijn indices} \label{fig:de-Bruijn} \end{figure} Because Haskell and ML allow case analysis on $\lambda$-bound variables, one might worry that our HOAS representation of the object language allows \emph{exotic terms} and is thus inadequate. To the contrary, because the representation of an object term is \emph{parametrically} polymorphic over the type constructor |repr| of the interpreter, $\lambda$-bound object variables cannot be case-analyzed. We thus follow \citet{washburn-boxes-jfp} in ``enforcing term parametricity with type parametricity'' to represent and fold over abstract syntax. Although the rest of this paper continues to represent binding using HOAS\@, our approach is compatible with de Bruijn indices. The accompanying source file |incope-dB.ml| implements this alternative, starting with the |Symantics| signature and the |R| evaluator in Figure~\ref{fig:de-Bruijn}. In this encoding of the object language, |vz| represents the innermost variable, |vs vz| represents the second-to-innermost variable, and so on. The new type argument |'h| to |repr| tracks the type of the environment as a nested tuple, each of whose components is a value of type |('c,'dv) vr| representing a variable of type~|'dv|. The evaluator~|R| interprets each object term as a function from its environment to its value. \section{A tagless compiler (or, a staged interpreter)}\label{S:compiler} Besides immediate evaluation, we can compile our object language into OCaml code using MetaOCaml's staging facilities. MetaOCaml represents future-stage expressions of type~$t$ as values of type |('c, |$t$|) code|, where |'c| is the environment classifier \citep{WalidPOPL03,calcagno-ml-like}. Code values are created by a \emph{bracket} form |.<|$e$|>.|, which quotes the expression~$e$ for evaluation at a future stage. The \emph{escape} |.~|$e$ must occur within a bracket and specifies that the expression~$e$ must be evaluated at the current stage; its result, which must be a code value, is spliced into the code being built by the enclosing bracket. The \emph{run} form |.!|$e$ evaluates the future-stage code value~$e$ by compiling and linking it at run time. Bracket, escape, and run are akin (modulo hygiene) to quasi-quotation, unquotation, and |eval| of Lisp. % this code does not have the 'sv parameter. It shows up later. \ifshort \begin{SaveVerbatim}{5a} module C = struct type ('c,'dv) repr = ('c,'dv) code let int (x:int) = .<x>. let bool (b:bool) = .<b>. let lam f = .<fun x -> .~(f .<x>.)>. let app e1 e2 = .<.~e1 .~e2>. let fix f = .<let rec self n = .~(f .<self>.) n in self>. let add e1 e2 = .<.~e1 + .~e2>. let mul e1 e2 = .<.~e1 * .~e2>. let leq e1 e2 = .<.~e1 <= .~e2>. let if_ eb et ee = .<if .~eb then .~(et ()) else .~(ee ())>. end \end{SaveVerbatim} \else \begin{SaveVerbatim}{5a} module C = struct type ('c,'dv) repr = ('c,'dv) code let int (x:int) = .<x>. let bool (b:bool) = .<b>. let lam f = .<fun x -> .~(f .<x>.)>. let app e1 e2 = .<.~e1 .~e2>. let fix f = .<let rec self n = .~(f .<self>.) n in self>. let add e1 e2 = .<.~e1 + .~e2>. let mul e1 e2 = .<.~e1 * .~e2>. let leq e1 e2 = .<.~e1 <= .~e2>. let if_ eb et ee = .<if .~eb then .~(et ()) else .~(ee ())>. end \end{SaveVerbatim} \fi \begin{SaveVerbatim}{5b} let module E = EX(C) in E.test1 () \end{SaveVerbatim} \begin{SaveVerbatim}{5c} let module E = EX(C) in E.testpowfix7 \end{SaveVerbatim} \begin{SaveVerbatim}{5d} .<fun x_1 -> (fun x_2 -> let rec self_3 = fun n_4 -> (fun x_5 -> if x_5 <= 0 then 1 else x_2 * self_3 (x_5 + (-1))) n_4 in self_3) x_1 7>. \end{SaveVerbatim} To turn the evaluator~|R| into a simple compiler, we bracket the computation on values to be performed at run time, then escape the code generation from terms to be performed at compile time. Adding these stage annotations yields the compiler~|C| \ifshort in Figure~\ref{fig:interpreter-C}(a).\else below. \UseVerbatim{5a} \fi \ifshort \begin{figure}[t] (a) \BUseVerbatim{5a} \smallskip (b) \BUseVerbatim{5b} \smallskip (c) \BUseVerbatim{5c} \smallskip (d) \BUseVerbatim{5d} \medskip \caption{The tagless staged interpreter \texttt{C}} \label{fig:interpreter-C} \end{figure} \fi This is a straightforward staging of |module R|. This compiler produces unoptimized code. For example, interpreting our |test1| with \ifshort Figure~\ref{fig:interpreter-C}(b) \else \UseVerbatim{5b}\fi gives the code value |.<(fun|~|x_6|~|->|~|x_6)| |true>.| of inferred type |('c,|~|bool)| |C.repr|. Interpreting |testpowfix7| with \ifshort Figure~\ref{fig:interpreter-C}(c) \else \UseVerbatim{5c}\fi gives a code value with many apparent $\beta$- and $\eta$-redexes\ifshort, Figure~\ref{fig:interpreter-C}(d). \else: \UseVerbatim{5d}\fi This compiler does not incur any interpretive overhead: the code produced for $\fun{x}x$ is simply |fun|~|x_6|~|->|~|x_6|\ifshort\else\ and does not call the interpreter, unlike the recursive calls to |eval0| and |eval| in the |L e| lines in \S\ref{tagproblem}\fi. The resulting code obviously contains no tags and no pattern matching. The environment classifiers here, like the tuple types in \S\ref{ourapproach}, make it a type error to run an open expression. \ifshort The accompanying code shows the Haskell implementation. \else \begin{proposition} If an object term~$e$ has the type~$t$ in the context $x_1:t_1,\dotsc,x_n:t_n$, then in a MetaOCaml environment \[ \texttt{open C},\quad x_1 \mapsto \texttt{.<$y_1$>.},\enspace \dotsc,\enspace x_n \mapsto \texttt{.<$y_n$>.} \] where each $y_i$ is a future-stage variable of type $M(t_i)$, the MetaOCaml term~$M(e)$ evaluates to a code value, of type~\texttt{('c,\:$M(t)$)\;code} (polymorphic over~|'c|), that contains no pattern-matching operations. \end{proposition} \begin{proof} By structural induction on the typing derivation of~$e$. \end{proof} \begin{corollary} If a closed object term~$e$ has type~$t$, then under the OCaml module definition \[ \texttt{\begin{tabular}{@{}l@{}} module RESULT = \\ \quad (functor (S:Symantics) -> struct open S let term () = $M(e)$ end) \\ \quad (C) \end{tabular}} \] the expression \texttt{RESULT.term ()} evaluates to a code value, of type~\texttt{('c,\:$M(t)$)\;code} (polymorphic over~|'c|), that contains no pattern-matching operations. \end{corollary} We have also implemented this compiler in Haskell. Since Haskell has no convenient facility for typed staging, we emulate it by defining a data type |ByteCode| with constructors such as |Var|, |Lam|, |App|, |Fix|, and |INT|. (Alternatively, we could use Template Haskell \citep{sheard-template} as our staging facility: \texttt{ByteCode} can be mapped to the abstract syntax of Template Haskell. The output of our compiler would then be assuredly type-correct Template Haskell.) Whereas our representation of object terms uses HOAS, our bytecode uses integer-named variables to be realistic. We then define \begin{code} newtype C t = C (Int -> (ByteCode t, Int)) \end{code} where |Int| is the counter for creating fresh variable names. We define the compiler by making |C| an instance of the class |Symantics|. The implementation is quite similar (but slightly more verbose) than the MetaOCaml code above. (The implementation uses GADTs because we also wanted to write a typed interpreter for the \texttt{ByteCode} \emph{data type}.) The accompanying code gives the full details. \fi %------------------------------------------------------------------------ % This is the version for the APLAS paper. \ifshort \input{tagless-final-aplas} %------------------------------------------------------------------------ % This is the longer version of the section \else \section{A tagless partial evaluator}\label{PE} Surprisingly, this |Symantics| interface extends to encompass an online partial evaluator that uses no universal type and no tags for object types. We present this partial evaluator in a sequence of three attempts to express the types of residualization and binding-time analysis. Our partial evaluator is a modular extension of the evaluator in~\S\ref{S:interpreter-RL} and the compiler in~\S\ref{S:compiler}, in that it uses the former to reduce static terms and the latter to build dynamic terms. \subsection{Avoiding polymorphic lift} \label{S:PE-lift} Roughly, a partial evaluator interprets each object term to yield either a static (present-stage) term (using the evaluator~|R|) or a dynamic (future-stage) term (using the compiler~|C|). To distinguish between static and dynamic terms, we might try to define |repr| in the partial evaluator as follows. In the phase tags |S0| and~|D0|, the digit zero indicates our initial attempt. \begin{code} type ('c,'dv) repr = S0 of ('c,'dv) R.repr | D0 of ('c,'dv) C.repr \end{code} To extract a dynamic term from this type, we create the function \begin{code} let abstrI0 (e : ('c,int) repr) : ('c,int) C.repr = match e with S0 e -> C.int e | D0 e -> e \end{code} and a similar function |abstrB0| for dynamic boolean terms. Here, |C.int| is used to convert a static term (of type |('c,int) R.repr|, which is just |int|) to a dynamic term. We can now define the following components required by the |Symantics| signature: \begin{code} let int (x:int) = S0 (R.int x) let bool (x:bool) = S0 (R.bool x) let add e1 e2 = match (e1,e2) with | (S0 e1, S0 e2) -> S0 (R.add e1 e2) | _ -> D0 (C.add (abstrI0 e1) (abstrI0 e2)) \end{code} Integer and boolean literals are immediate, present-stage values. Addition yields a static term (using~|R.add|) if and only if both operands are static; otherwise we extract the dynamic terms from the operands and add them using~|C.add|. Whereas |mul| and |leq| are as easy to define as |add|, we encounter a problem with |if_|. Suppose that the first argument to |if_| is a dynamic term (of type |('c,bool) C.repr|), the second a static term (of type |('c,'a) R.repr|), and the third a dynamic term (of type |('c,'a) C.repr|). We then need to convert the static term to dynamic, but there is no polymorphic ``lift'' function, of type |'a -> ('c,'a) C.repr|, to send a value to a future stage \citep{xi-guarded,WalidPOPL03}. \begin{comment} (By the way, if we were to add polymorphic \texttt{lift} to the type class \texttt{Symantics repr}, then \texttt{repr} would become an instance of \texttt{Applicative} and thus \texttt{Functor}:\texttt{ fmap f = app (lift f)~}.) \end{comment} Our |Symantics| signature only includes separate lifting methods |bool| and |int|, not a polymorphic lifting method, for good reason: When compiling to a first-order target language such as machine code, booleans, integers, and functions may well be represented differently. Compiling a polymorphic lift function thus requires intensional type analysis. To avoid needing polymorphic lift, we turn to \citets{Sperber-SelfApplicable} and \citets{asai-binding-time} technique of building a dynamic term alongside every static term \citep{sumii-hybrid}. \subsection{Delaying binding-time analysis} \label{S:PE-problem} We start building the partial evaluator anew and switch to the data type \begin{code} type ('c,'dv) repr = P1 of ('c,'dv) R.repr option * ('c,'dv) C.repr \end{code} so that a partially evaluated term always contains a dynamic component and sometimes contains a static component. The two alternative constructors of an |option| value, |Some| and |None|, tag each partially evaluated term to indicate whether its value is known statically at the present stage. This tag is not an object type tag: all pattern matching below is exhaustive. Now that the future-stage component is always available, we can define the polymorphic function \begin{code} let abstr1 (P1 (_,dyn) : ('c,'dv) repr) : ('c,'dv) C.repr = dyn \end{code} to extract it without needing polymorphic lift into~|C|. We then try to define the term combinators\nobreak\hspace{0pt}---and get as far as the first-order constructs of our object language, including |if_|. \begin{code} let int (x:int) = P1 (Some (R.int x), C.int x) let add e1 e2 = match (e1,e2) with | (P1 (Some n1, _), P1 (Some n2, _)) -> int (R.add n1 n2) | _ -> P1 (None, C.add (abstr1 e1) (abstr1 e2)) let if_ eb et ee = match eb with | P1 (Some s, _) -> if s then et () else ee () | _ -> P1 (None, C.if_ (abstr1 eb) (fun () -> abstr1 (et ())) (fun () -> abstr1 (ee ()))) \end{code} However, we stumble on functions. Given how we just defined~|repr|, a partially evaluated object function, such as the identity $\fun{x}x$ (of type $\ZZ\to\ZZ$) embedded in OCaml as |lam (fun x -> x)| (of type |('c,int->int) repr|), consists of a dynamic part (of type |('c,int->int) C.repr|) and optionally a static part (of type |('c,int->int) R.repr|). The dynamic part is useful when this function is passed to another function that is only dynamically known, as in $\fun{k}k(\fun{x}x)$. The static part is useful when this function is applied to a static argument, as in $(\fun{x}x)1$. Neither part, however, lets us \emph{partially} evaluate the function, that is, compute as much as possible statically when it is applied to a mix of static and dynamic inputs. For example, the partial evaluator should turn $\fun{n}(\fun{x}x)n$ into $\fun{n}n$ by substituting $n$ for~$x$ in the body of $\fun{x}x$ even though $n$ is not statically known. The same static function, applied to different static arguments, can give both static and dynamic results: we want to simplify $(\fun{y}x\times y)0$ to~$0$ but $(\fun{y}x\times y)1$ to~$x$. To enable these simplifications, we delay binding-time analysis for a static function until it is applied, that is, until |lam f| appears as the argument of |app|. To do so, we have to incorporate |f| as is into |lam f|: the type |('c,'a->'b) repr| should be one of \begin{code} S1 of ('c,'a) repr -> ('c,'b) repr | E1 of ('c,'a->'b) C.repr P1 of (('c,'a) repr -> ('c,'b) repr) option * ('c,'a->'b) C.repr \end{code} unlike |('c,int) repr| or |('c,bool) repr|. That is, we need a nonparametric data type, something akin to type-indexed functions and type-indexed types, which \citet{oliveira-typecase} dub the \emph{typecase} design pattern. Thus, typed partial evaluation, like typed CPS transformation (see \S\ref{S:CPS}), inductively defines a map from source types to target types that performs case distinction on the source type. In Haskell, typecase can be implemented using either GADTs or type-class functional dependencies \citep{oliveira-typecase}. The accompanying code shows both approaches (|Incope.hs| and |incope1.hs|), neither of which is portable to OCaml. In addition, the problem of non\hyp exhaustive pattern\hyp matching reappears in the GADT approach because GHC 6.8 and prior cannot see that a particular type of GADT value precludes certain constructors. Although this is an implementation issue of GHC, it indicates that assuring exhaustive pattern match with GADTs requires non-trivial reasoning (beyond the abilities of GHC at the moment); certainly GADTs fail to make it \emph{syntactically} apparent that pattern matching is exhaustive. %% \subsection{Eliminating tags from typecase} %% \label{S:PE-GADT} %% Two common ways to provide typecase in Haskell are %% GADTs and type-class functional dependencies %% \citep{oliveira-typecase}. These %% methods are equivalent, and here we use GADTs; |incope1.hs| %% in the accompanying source code shows the latter. %% We introduce a GADT with four data constructors. %% \begin{code} %% data P t where %% VI :: Int -> P Int %% VB :: Bool -> P Bool %% VF :: (P a -> P b) -> P (a -> b) %% E :: C t -> P t %% \end{code} %% The constructors |VI|, |VB|, and |VF| build static terms (like |S0| %% in~\S\ref{S:PE-lift}), and |E| builds dynamic terms (like |D0|). However, %% the type |P t| is no longer parametric in~|t|: the constructor |VF| takes an %% operand of type |P a -> P b| rather than |a -> b|. We define a function %% like |abstr1| above to extract a future-stage computation from a %% value of type |P t|. %% \begin{code} %% abstr :: P t -> C t %% abstr (VI i) = int i %% abstr (VB b) = bool b %% abstr (VF f) = lam (abstr . f . E) %% abstr (E x) = x %% \end{code} %% The cases of this function |abstr| are type-indexed. In particular, the |VF f| %% case uses the method |lam| of the |C| interpreter to compile~|f|. %% We may now make |P| an instance of %% |Symantics| and implement the partial evaluator as follows. We elide %% |mul|, |leq|, |if_|, and |fix|. %% \begin{code} %% instance Symantics P where %% int x = VI x %% bool b = VB b %% add (VI n1) (VI n2) = VI (n1 + n2) %% add e1 e2 = E (add (abstr e1) (abstr e2)) %% lam = VF %% app (VF f) ea = f ea %% app (E f) ea = E (app f (abstr ea)) %% \end{code} %% The implementations of |int|, |bool|, and |add| are like %% in~\S\ref{S:PE-problem}. The interpretation of |lam f| is |VF f|, %% which just wraps the HOAS function |f|. %% We can always compile |f| to a code value, %% but we delay it to apply |f| to concrete arguments. The interpretation of %% |app ef ea| checks to see if |ef| is such a delayed %% HOAS function |VF f|. If it is, we apply |f| to the %% concrete argument |ea|, giving us a chance to perform static %% computations (see example |testpowfix7| in~\S\ref{S:PE-solution}). If |ef| is a %% dynamic value |E f|, we residualize. %% This solution using GADTs works but is not quite satisfactory. First, it %% cannot be ported to MetaOCaml, as GADTs are unavailable there. Second, %% the problem of nonexhaustive pattern\hyp matching reappears in %% |app| above: the type |P t| has four constructors, of which the pattern in %% |app| matches only |VF| and~|E|. One may say that the %% constructors |VI| and |VB| obviously cannot occur because they do not %% construct values of type |P (a -> b)| as required by the type of |app|. %% Indeed, the metalanguage implementation could reason thus: %% if we use inductive families (as in Coq) or logical %% frameworks with canonical forms (as in Twelf with its coverage checker), %% we can prove the pattern matching to be exhaustive. %% Then again, the metalanguage implementation may not reason thus: %% GHC cannot and issues warnings. %% Although this point may seem minor, it is the heart of %% the tagging problem and the purpose of tag elimination. A typed tagged %% interpreter contains many pattern\hyp matching forms that look partial %% but never fail in reality. The %% goal is to make this exhaustiveness \emph{syntactically} apparent. \subsection{The ``final'' solution} \label{S:PE-solution} The problem in the last section is that we want to write \begin{code} type ('c,'dv) repr = P1 of ('c,'dv) static option * ('c,'dv) C.repr \end{code} where |static| is the type function defined % inductively because P below depends on static by \begin{code} ('c,int) static = ('c,int) R.repr ('c,bool) static = ('c,bool) R.repr ('c,'a->'b) static = ('c,'a) repr -> ('c,'b) repr \end{code} Although we can use type classes to define this type function in Haskell, that is not portable to OCaml. However, the three typecase alternatives of |static| are already present in existing methods of |Symantics|. Thus emerges a simple and portable solution, if a long-winded one: we bake |static| into the signature |Symantics|. In Figure~\ref{fig:ocaml-simple}, the |repr| type constructor took two arguments |('c,'dv)|; in Figure~\ref{fig:ocaml}, we add an argument |'sv| for the type |('c,'dv) static|. \begin{figure} \begin{floatrule} \begin{code2} module type Symantics = sig type ('c,'sv,'dv) repr val int : int -> ('c,int,int) repr val bool: bool -> ('c,bool,bool) repr val lam : (('c,'sa,'da) repr -> ('c,'sb,'db) repr as 'x) -> ('c,'x,'da -> 'db) repr val app : ('c,'x,'da -> 'db) repr -> (('c,'sa,'da) repr -> ('c,'sb,'db) repr as 'x) val fix : ('x -> 'x) -> (('c, ('c,'sa,'da) repr -> ('c,'sb,'db) repr, 'da -> 'db) repr as 'x) val add : ('c,int,int) repr -> ('c,int,int) repr -> ('c,int,int) repr val mul : ('c,int,int) repr -> ('c,int,int) repr -> ('c,int,int) repr val leq : ('c,int,int) repr -> ('c,int,int) repr -> ('c,bool,bool) repr val if_ : ('c,bool,bool) repr -> (unit -> 'x) -> (unit -> 'x) -> (('c,'sa,'da) repr as 'x) end \end{code2} \end{floatrule} \caption{A (Meta)OCaml embedding of our object language that supports partial evaluation} \label{fig:ocaml} \end{figure} \begin{figure} \begin{floatrule} \begin{code2}[commandchars=\@\[\]] module P = struct type ('c,'sv,'dv) repr = {st: 'sv option; dy: ('c,'dv) code} let abstr {dy = x} = x let pdyn x = {st = None; dy = x} let int (x:int ) = {st = Some (R.int x); dy = C.int x} let bool (x:bool) = {st = Some (R.bool x); dy = C.bool x} let add e1 e2 = match e1, e2 with | {st = Some 0}, e | e, {st = Some 0} -> e | {st = Some m}, {st = Some n} -> int (R.add m n) | _ -> pdyn (C.add (abstr e1) (abstr e2)) let if_ eb et ee = match eb with | {st = Some b} -> if b then et () else ee () | _ -> pdyn (C.if_ (abstr eb) (fun () @!->@! abstr (et @!())) (fun () @!->@! abstr (ee @!()))) let lam f = {st = Some f; dy = C.lam (fun x -> abstr (f (pdyn x)))} let app ef ea = match ef with | {st = Some f} -> f ea | _ -> pdyn (C.app (abstr ef) (abstr ea)) let fix f = let fdyn = C.fix (fun x -> abstr (f (pdyn x))) in let rec self = function | {st = Some @!_} as e -> app (f (lam self)) e | e -> pdyn (C.app fdyn (abstr e)) in {st = Some self; dy = fdyn} end \end{code2} \end{floatrule} \caption{Our partial evaluator (\texttt{mul} and \texttt{leq} are elided)} \label{fig:pe} \end{figure} The interpreters |R|, |L| and~|C| in \S\ref{S:interpreter-RL} and~\S\ref{S:compiler} only use the old type arguments |'c| and~|'dv|, which are treated by the new signature in the same way. Hence, all that needs to change in these interpreters to match the new signature is to add a phantom type argument~|'sv| to~|repr|. For example, the compiler |C| now begins \begin{code} module C = struct type ('c,'sv,'dv) repr = ('c,'dv) code \end{code} with the rest the same. Figure~\ref{fig:pe} shows the partial evaluator~|P|. Its type |repr| expresses the definition for |static| given at the start of this section, with |'sv| taking the crucial place of |('c,'dv) static|. The function |abstr| extracts a future-stage code value from the result of partial evaluation. Conversely, the function |pdyn| injects a code value into the |repr| type. Thus, |abstr| and |pdyn| are like the \emph{reify} and \emph{reflect} functions defined in normalization by evaluation \citep{Danvy-TDPE}, but as in~\S\ref{S:PE-problem}, we build dynamic terms alongside any static ones to express how the lift function is indexed by the dynamic type. Analogously, we now build a static type alongside the dynamic type to express how the static type is indexed by the dynamic type. Thus we establish a bijection |static| between static and dynamic types, without defining at the type level the injection\hyp projection pairs customarily used to establish such bijections for interpreters \citep{Ramsey-ML-module-mania,Benton-embedded-interpreters}, partial evaluation \citep{Danvy-TDPE}, and type-level functions \citep{oliveira-typecase}. This emulation of type-indexed types is related to intensional type analysis \citep{Morrisett-intensional,Generic-Haskell}, but intensional type analysis cannot handle our |fix| \citep{xi-guarded}. The static portion of the interpretation of |lam f| is |Some f|, which just wraps the HOAS function |f|. The interpretation of |app ef ea| checks to see if |ef| is such a wrapped HOAS function. If it is, we apply |f| to the concrete argument |ea|, so as to perform static computations (see the example below). If |ef| has only a dynamic part, we residualize. To illustrate how to add optimizations, we improve |add| (and |mul|, elided) to simplify the generated code using the monoid (and ring) structure of~|int|: not only is addition performed statically (using~|R|) when both operands are statically known, but it is eliminated when one operand is statically~$0$; similarly for multiplication by~$0$ or~$1$. Although our basic machinery for partial evaluation is independent of such algebraic simplifications, it makes them easy to add and to abstract over the specific domains (such as monoid or ring) where they apply. These simplifications and abstractions help a lot in a large language with more base types and primitive operations. Incidentally, the accompanying code actually contains a more general implementation mechanism for such features, inspired in part by previous work in generative linear algebra~\citep{CaretteKiselyov05}. Any partial evaluator must decide how much to unfold recursion statically: unfolding too little can degrade the residual code, whereas unfolding too much risks nontermination. Our partial evaluator is no exception, because our object language includes |fix|. The code in Figure~\ref{fig:pe} takes the na\"\i ve approach of ``going all the way'', that is, whenever the argument is static, we unfold |fix| rather than residualize it. A conservative alternative is to unfold recursion only once, then residualize: \begin{code} let fix f = f (pdyn (C.fix (fun x -> abstr (f (pdyn x))))) \end{code} Many sophisticated approaches have been developed to decide how much to unfold \citep{Jones-Mix,jones-partial}, but this issue is orthogonal to our presentation. A separate concern in our treatment of |fix| is possible code bloat in the residual program, which calls for let-insertion \citep{BondorfDanvy}. Given this implementation of~|P|, our running example \begin{code} let module E = EX(P) in E.test1 () \end{code} evaluates to \begin{code} {P.st = Some true; P.dy = .<true>.} \end{code} of type |('a, bool, bool) P.repr|. Unlike with~|C| in~\S\ref{S:compiler}, a $\beta$-reduction has been statically performed to yield |true|. More interestingly, whereas |testpowfix7| compiles to a code value with many $\beta$-redexes in~\S\ref{S:compiler}, the partial evaluation \begin{code} let module E = EX(P) in E.testpowfix7 \end{code} gives the desired result \begin{code} {P.st = Some <fun>; P.dy = .<fun x -> x * (x * (x * (x * (x * (x * x)))))>.} \end{code} If the object program does not use |fix|, then the output of |P| is $\beta$-normal. Also, |P| is correct in that, if interpreting an object term using~|P| terminates, then the |dy| component of the output is equivalent to the interpretation of the same object term using~|C|, modulo $\alpha$-renaming, $\beta$-reduction, and algebraic simplification. To prove this correctness by structural induction on the object term, we need to strengthen the induction hypothesis to assert that the |st| component, if not |None|, is consistent with the |dy| component. All pattern\hyp matching in~|P| is \emph{syntactically} exhaustive, so it is patent to the metalanguage implementation that |P| never gets stuck. Further, |P| uses pattern\hyp matching only to check if a value is known statically, never to check what type a value has dynamically. In other words, our partial evaluator tags phases (with |Some| and |None|) but not object types, so it is patent that the \emph{output} of~|P| never gets stuck. Our partial evaluator owes much to \citet{Thiemann-combinators} and \citet{sumii-hybrid}, who deforested the object term representation and expressed a partial evaluator as a collection of term combinators in a typed metalanguage. Like us, \citeauthor{sumii-hybrid} follow \citet{Sperber-SelfApplicable} and \citet{asai-binding-time} in building static and dynamic terms in tandem, to combine offline and online partial evaluation. \citearound{'s earlier self-reducers for the untyped $\lambda$-calculus}\Citet{JFP-Mogensen,Mogensen-SelfApplicable} also build static and dynamic terms in tandem. However, they build a static term for every object term, even a bound variable, so they move some work from |app| to~|pdyn| (in terms of Figure~\ref{fig:pe}) and remain untyped. In contrast, we follow Sperber, Asai, and \citeauthor{sumii-hybrid} in leaving the static term optional, so as to perform lifting without juggling explicit type indices in the encoding of an object term. The idea of generating static and dynamic components alongside each other is part of the tradition that developed partial evaluators such as Schism \citep[\S5]{consel-schism-93}. Our contribution to the literature on partial evaluation is to use mere Hindley-Milner types in the metalanguage to assure statically and patently that partially evaluating a well-typed object program not only never gets stuck but also, if it terminates, produces a well-typed output program that never gets stuck. Moreover, thanks to the online binding-time analysis performed by our partial evaluator (in contrast to \citeauthor{Thiemann-combinators}'s), these types form an instance of a general |Symantics| signature that encompasses other interpreters such as evaluation and compilation. This early and manifest assurance of type safety contrasts, for example, with \citearound{'s compiler generator (cogen) for ML}\citet{Birkedal-PE-ML}, which transforms a program into its tagless generating extension. Because that cogen uses a universal type, the fact that it never generates an ill-typed generating extension from a well-typed input program is only manifest when each generating extension is type-checked, and the fact that the generating extension never generates an ill-typed residual program from well-typed static input is only manifest when each residual program is type-checked. Similarly, the fact that the partial evaluator of \citet{fiore:nbe-ppdp2002} and that of \citet{balat:tdpe-popl2004}, both of which use delimited control operators, never turn well-typed code into ill-typed code is not assured by the metalanguage, whether or not as part of a typed family of interpreter modules. \begin{comment} % Removed the following comment in view of Section 4.5 of Danvy's % original TDPE paper (POPL 1996) It is type-directed, so the user must represent, as a term, the type of every term to be partially evaluated. We shift this work to the type checker of the metalanguage. By avoiding term-level type representations, our approach makes it easier to perform algebraic simplifications (as in~\S\ref{S:PE-solution}). \end{comment} Our partial evaluator reuses the compiler~|C| and the evaluator~|R| by composing them. This situation is simpler than \citets{SperberThiemann:TwoForOne} composition of a partial evaluator and a compiler, but the general ideas are similar. \fi % don't even have space for CPS \ifshort\else \section{Continuation\hyp passing style}\label{variations} Our approach accommodates several variants, including a call-by-name CPS interpreter and a call-by-value CPS transformation. Of course, CPS is a well-studied topic, and \citearound{'s work on program generation}\citet{Thiemann-combinators} already includes a CPS evaluator expressed using combinator functions rather than data constructors. We focus here on expressing CPS transformations as part of a larger, typed family of interpreters. \ifshort\label{state}\label{S:CPS}% This lets us decouple the evaluation strategy of the object language from that of the metalanguage. The accompanying code shows the CBN CPS interpreter (module |RCN| implementing |Symantics|) and a CBV CPS transformer |CPST|. The latter explicitly maps CPS interpretations to (direct) interpretations performed by the base interpreter~|S|. All these interpreters are typed, tagless and \emph{type-preserving} (as well as fully polymorphic in the answer-type). The type preservation is the consequence of the type soundness of the metalanguage. We can modify the CBV CPS transformation to pass a piece of state along with the continuation. This technique lets us support mutable state. Due to the severe lack of space we cannot describe these interpreters and refer the reader to the accompanying code. \else \subsection{Call-by-name CPS interpreters}\label{S:CPS} The object language generally inherits the evaluation strategy from the metalanguage---call-by-value (CBV) in OCaml, call-by-name (CBN) in Haskell.\footnote{To be more precise, most Haskell implementations use call-by-need, which is observationally equivalent to call-by-name because sharing is not observable \citep{ariola-call-by-need-popl}.} To represent a CBN object language in a CBV metalanguage, \citet{reynolds-definitional,reynolds-relation} and \citet{PlotkinCBN} introduce CPS to make the evaluation strategy of a definitional interpreter indifferent to that of the metalanguage. To achieve the same indifference in the typed setting, we build a CBN CPS interpreter for our object language in OCaml. The interpretation of an object term is a function mapping a continuation~|k| to the answer returned by~|k|. \begin{code} let int (x:int) = fun k -> k x let add e1 e2 = fun k -> e1 (fun v1 -> e2 (fun v2 -> k (v1 + v2))) \end{code} In both |int| and |add|, the interpretation has type |(int -> 'w)|\texttt{ }|-> 'w|, where |'w| is the (polymorphic) answer type. Unlike CBV CPS, the CBN CPS interprets abstraction and application as follows: \begin{code} let lam f = fun k -> k f let app e1 e2 = fun k -> e1 (fun f -> f e2 k) \end{code} Characteristic of CBN, |app e1 e2| does not evaluate the argument~|e2| by applying it to the continuation~|k|. Rather, it passes |e2| unevaluated to the abstraction. Interpreting $\fun{x} x+1$ yields type \begin{code} ((((int -> 'w1) -> 'w1) -> (int -> 'w1) -> 'w1) -> 'w2) -> 'w2 \end{code} We would like to collect those interpretation functions into a module with signature |Symantics|, to include the CBN CPS interpreter within our general framework. Alas, as in~\S\ref{S:PE-problem}, the type of an object term inductively determines the type of its interpretation: the interpretation of an object term of type~$t$ may not have type |(|$t$|->'w)->'w|, because $t$ may be a function type. Again we simulate a type function with a typecase distinction, by changing the type arguments to |repr|. Luckily, the type function |static| needed for the partial evaluator in~\S\ref{S:PE-solution} is precisely the same type function we need for CBN CPS\@, so our CBN interpreter can match the |Symantics| signature in~\S\ref{S:PE-solution}, without even using the |'dv| argument to |repr|. \begin{code} module RCN = struct type ('c,'sv,'dv) repr = {ko: 'w. ('sv -> 'w) -> 'w} let int (x:int) = {ko = fun k -> k x} let add e1 e2 = {ko = fun k -> e1.ko (fun v1 -> e2.ko (fun v2 -> k (v1 + v2)))} let if_ eb et ee = {ko = fun k -> eb.ko (fun vb -> if vb then (et ()).ko k else (ee ()).ko k)} let lam f = {ko = fun k -> k f} let app e1 e2 = {ko = fun k -> e1.ko (fun f -> (f e2).ko k)} let fix f = let rec fx f n = app (f (lam (fx f))) n in lam (fx f) let run x = x.ko (fun v -> v) end \end{code} This interpreter~|RCN| is fully polymorphic over the answer type, using higher-rank polymorphism through OCaml record types. To avoid this higher-rank polymorphism in the core language, we could also define |RCN| as a functor parameterized over the answer type. \begin{code} module RCN(W: sig type w end) = struct type ('c,'sv,'dv) repr = ('sv -> W.w) -> W.w ... \end{code} This alternative is more cumbersome to use because the functor needs to be applied once for each answer type, but it translates to, for example, Standard ML\@, whose core language does not support higher-rank polymorphism. Because |RCN| has the signature |Symantics|, we can instantiate our previous examples with it, and all works as expected. More interesting is the example $(\fun{x}1)\bigl((\fix{f}f)\mathinner2\bigr)$, which terminates under CBN but not CBV\@. \begin{code} module EXS(S: Symantics) = struct open S let diverg () = app (lam (fun x -> int 1)) (app (fix (fun f->f)) (int 2)) end \end{code} Interpreting |EXS| with the |R| interpreter of \S\ref{S:interpreter-RL} does not terminate. \begin{code} let module M = EXS(R) in M.diverg () \end{code} In contrast, the CBN interpreter gives the result~|1|. \begin{code} let module M = EXS(RCN) in RCN.run (M.diverg ()) \end{code} \subsection{CBV CPS transformers} Changing one definition turns our CBN CPS interpreter into CBV\@. \begin{code} module RCV = struct include RCN let lam f = {ko = fun k -> k (fun e -> e.ko (fun v -> f {ko = fun k -> k v}))} end \end{code} Now an applied abstraction evaluates its argument before proceeding. The interpreter~|RCV| is useful for CBV evaluation of the object language whether the metalanguage is CBV or CBN\@. To match the same |Symantics| signature as |RCN| above, |RCV| uses \citets{reynolds-relation} CBV CPS transformation, in which variables denote computations (that is, functions from continuations), rather than \citets{PlotkinCBN}, in which variables denote values. We turn to a more general approach to CBV CPS: a CPS transformer that turns any implementation of |Symantics| into a CPS version of that evaluator. This functor on interpreters performs \citets{PlotkinCBN} textbook CPS transformation on the object language. \begin{code} module CPST(S: Symantics) = struct let int i = S.lam (fun k -> S.app k (S.int i)) let add e1 e2 = S.lam (fun k -> S.app e1 (S.lam (fun v1 -> S.app e2 (S.lam (fun v2 -> S.app k (S.add v1 v2)))))) let lam f = S.lam (fun k -> S.app k (S.lam (fun x -> f (S.lam (fun k -> S.app k x))))) let app e1 e2 = S.lam (fun k -> S.app e1 (S.lam (fun f -> S.app e2 (S.lam (fun v -> S.app (S.app f v) k))))) let fix = S.fix end \end{code} This (abbreviated) code explicitly maps CPS interpretations to (direct) interpretations performed by the base interpreter~|S|. The module returned by |CPST| does not define |repr| and thus does not have signature |Symantics|. The reason is again the type of |lam f|. Whereas |int| and |add| return the (abbreviated) type |('c, ..., (int -> 'w) -> 'w) S.repr|, the type of \texttt{lam (add (int~1))} is \begin{code} ('c, ..., ((int -> (int -> 'w1) -> 'w1) -> 'w2) -> 'w2) S.repr \end{code} Hence, to write the type equation defining |CPST.repr| we again need a type function with a typecase distinction, similar to |static| in~\S\ref{S:PE-solution}. Alas, the type function we need is not identical to |static|, so again we need to change the type arguments to |repr| in the |Symantics| signature. As in~\S\ref{S:PE-solution}, the terms in previous implementations of |Symantics| stay unchanged, but the |repr| type equations in those implementations have to take a new (phantom) type argument. The verbosity of these types is the only difficulty in defining a replacement signature for |Symantics| which captures that of |CPST| as well. For brevity, we just use the module returned by |CPST| as is. Because it does not match the signature |Symantics|, we cannot apply the |EX| functor to it. Nevertheless, we can write the tests. \begin{code} module T = struct module M = CPST(C) open M let test1 () = app (lam (fun x -> x)) (bool true) (* same as before *) let testpowfix () = ... (* same as before *) let testpowfix7 = (* same as before *) lam (fun x -> app (app (testpowfix ()) x) (int 7)) end \end{code} We instantiate |CPST| with the desired base interpreter~|C|, then use the result |M| to interpret object terms. Those terms are \emph{exactly} as before. Having to textually copy the terms is the price we pay for this simplified treatment. \begin{comment} Our discussion of self\hyp interpretation in~\S\ref{selfinterp} shows that this copying is not frivolous but represents plugging a term into a context, which is one of the many faces of polymorphism. \end{comment} With |CPST| instantiated by the compiler~|C| above, |T.test1| gives \begin{code} .<fun x_5 -> (fun x_2 -> x_2 (fun x_3 x_4 -> x_4 x_3)) (fun x_6 -> (fun x_1 -> x_1 true) (fun x_7 -> x_6 x_7 x_5))>. \end{code} This output is a na\"{\i}ve CPS transformation of $(\fun{x}x)\True$, containing several apparent $\beta$-redexes. To reduce these redexes, we just change~|T| to instantiate |CPST| with |P| instead. \begin{code} {P.st = Some <fun>; P.dy = .<fun x_5 -> x_5 true>.} \end{code} \subsection{Abstracting over an inductive map on object types} \begin{figure}[t] \begin{floatrule} \begin{code} module type Symantics1 = sig type 'c dint type 'c dbool type ('c,'da,'db) darr type ('c,'dv) repr val int : int -> ('c, 'c dint) repr val bool: bool -> ('c, 'c dbool) repr val lam : (('c,'da) repr -> ('c,'db) repr) -> ('c, ('c,'da,'db) darr) repr val app : ('c, ('c,'da,'db) darr) repr -> ('c, 'da) repr -> ('c, 'db) repr val fix : ('x -> 'x) -> (('c, ('c,'da,'db) darr) repr as 'x) val add : ('c, 'c dint) repr -> ('c, 'c dint) repr -> ('c, 'c dint) repr val mul : ('c, 'c dint) repr -> ('c, 'c dint) repr -> ('c, 'c dint) repr val leq : ('c, 'c dint) repr -> ('c, 'c dint) repr -> ('c, 'c dbool) repr val if_ : ('c, 'c dbool) repr -> (unit -> 'x) -> (unit -> 'x) -> (('c, 'da) repr as 'x) end \end{code} \end{floatrule} \caption{A (Meta)OCaml embedding that abstracts over an inductive map on object types} \label{fig:inductive} \end{figure} Having seen that each CPS interpreter above matches a differently modified |Symantics| signature, one may wonder whether |Symantics| can be generalized to encompass them all. The answer is yes: the |Symantics1| signature in Figure~\ref{fig:inductive} abstracts our representation of object terms not only over the type constructor |repr| but also over the three branches that make up an inductive map, such as |static| in~\S\ref{S:PE-solution}, from object types to metalanguage types. The first two branches (for the object types $\ZZ$ and~$\BB$) become the abstract types |dint| and |dbool|, whereas the third branch (for object types $t_1\to t_2$) becomes the new abstract type constructor |darr|. Almost every interpreter in this paper can be made to match the |Symantics1| signature without changing any terms, by defining |dint|, |dbool|, |darr|, and |repr| suitably. For example, the types in the evaluator~|R| and the CBV CPS transformer |CPST| should be changed as follows. \begin{code}[commandchars=\@\[\]] module R = struct type 'c dint = int type 'c dbool = bool type ('c,'da,'db) darr = 'da -> 'db type ('c,'dv) repr = 'dv ... module CPST(S: Symantics1)(W: sig type 'c dw end) = struct open W type 'c dint = 'c S.dint type 'c dbool = 'c S.dbool type ('c,'da,'db) darr = ('c, 'da, ('c, ('c, 'db, 'c dw) S.darr, 'c dw) S.darr) S.darr type ('c,'dv) repr = ('c, ('c, ('c, 'dv, 'c dw) S.darr, 'c dw) S.darr) S.repr ... \end{code} Modified thus, |CPST| produces modules that match |Symantics1| and can be not only evaluated or compiled but also transformed using |CPST| again. The accompanying source file |inco.ml| shows the details, including one-pass CPS transformations in the higher-order style of \citet{danvy-representing}. The abstract type constructors in Figure~\ref{fig:inductive} exemplify the amount of polymorphism that our technique requires of the metalanguage in order to represent a given object language. Generally, our technique represents term constructions (such as~$+$) by applying abstract functions (such as \texttt{add}) and represents type constructions (such as~$\to$) and typing judgments (namely `$:$') by applying abstract type constructors (such as \texttt{darr} and \texttt{repr}). Therefore, it requires the metalanguage to support enough polymorphism to abstract over the interpretation of each inference rule for well-typed terms and well-kinded types. For example, to encode System~F's term generalization rule \begin{equation*} \begin{prooftree} \[ [\alpha:\star] \proofoverdots e : t \] \justifies \Lambda\alpha.\,e : \forall\alpha.\,t \text, \end{prooftree} \end{equation*} the metalanguage must let terms (representing $\Lambda\alpha.\,e$) abstract over terms (interpreting~$\Lambda$) that are polymorphic both over type constructors of kind $\star\to\star$ (representing $t$ with $\alpha$ free) and over polymorphic terms of type $\forall\alpha\mathord:\mathord\star\dotso$ (representing $e$ with $\alpha$ free). These uses of higher-rank and higher-kind polymorphism let us type-check and compile object terms separately from interpreters. This observation is consistent with the role of polymorphism in the separate compilation of modules \citep{shao-typed}. The only interpreter in this paper that does not fit |Symantics1| is the partial evaluator~|P|. It does not fit because it uses \emph{two} inductive maps on object types---both |'sv| and |'dv| in Figure~\ref{fig:ocaml}. We could define a |Symantics2| signature to abstract over \emph{two} inductive maps over object types; it would include 4 abstract types and 2 abstract type constructors in addition to |repr|. It would then be easy to turn~|P| into a functor that returns a |Symantics2| module, but the input to~|P| can still only match |Symantics1|. This escalation points to a need for either record-kind polymorphism (so that |'dv| in Figure~\ref{fig:inductive} may be more than just one type) or type-indexed types (so that we do not need to emulate them in the first place). \subsection{State and imperative features} \label{state} \begin{figure} \begin{floatrule} \begin{proofrules} \[ \justifies \deref:t_s \] \[ e:t_s \justifies \set e:t_s \] \[ e_1:t_1 \quad \[ [x:t_1] \proofoverdots e_2:t_2 \] \justifies \lapp{e_1}{x}{e_2}:t_2 \] \end{proofrules} \end{floatrule} \caption{Extending our typed object language with mutable state of type~$t_s$} \label{fig:state} \end{figure} We can modify a CBN or CBV CPS transformation to pass a piece of state along with the continuation. This technique lets us support mutable state (or more generally any monadic effect) by representing it using continuations \citep{filinski-representing}. As Figure~\ref{fig:state} shows, we extend our object language with three imperative features. \begin{enumerate} \item ``$\deref$'' gets the current state; \item ``$\set e$'' sets the state to the value of~$e$ and returns the previous value of the state; \item the let-form ``$\lapp{e_1}{x}e_2$'' evaluates $e_1$ before~$e_2$ even if $e_2$ does not use~$x$ and even if evaluation is CBN\@. \end{enumerate} The form ``$\lapp{e_1}{x}e_2$'' is equivalent to ``let'' in \citearound{'s monadic metalanguage}\citet{moggi-notions}. If $x$ does not appear in~$e_2$, then it is same as the more familiar sequencing form ``$e_1;e_2$''. We embed this extended object language into OCaml by extending the |Symantics| signature in Figure~\ref{fig:ocaml}. \begin{code} module type SymSI = sig include Symantics type state type 'c states (* static version of the state *) val lapp : (('c,'sa,'da) repr as 'x) -> ('x -> 'y) -> (('c,'sb,'db) repr as 'y) val deref : unit -> ('c, 'c states, state) repr val set : (('c, 'c states, state) repr as 'x) -> 'x end \end{code} In HOAS\@, we write the term ``$\lapp{e_1}{x}e_2$'' as |lapp e1 (fun x -> e2)|; the type of |lapp| is that of function application with the two arguments swapped. We can encode the term ``$\lapp{\deref}{x} (\set 2;\, x+\deref)$'' as the OCaml functor \begin{code} module EXSI_INT(S: SymSI with type state = int and type 'c states = int) = struct open S let test1 () = lapp (deref ()) (fun x -> lapp (set (int 2)) (fun _ -> add x (deref ()))) end \end{code} The accompanying source code shows several more tests, including a test for higher-order state and a power function that uses state as the accumulator. The state-passing interpreter extends the CBN CPS interpreter |RCN| of~\S\ref{S:CPS}. \begin{code} module RCPS(ST: sig type state type 'c states type ('c,'sv,'dv) repr = {ko: 'w. ('sv -> 'c states -> 'w) -> 'c states -> 'w} end) = struct include ST ... let lapp e2 e1 = {ko = fun k -> e2.ko (fun v -> (app (lam e1) {ko = fun k -> k v}).ko k)} let deref () = {ko = fun k s -> k s s} let set e = {ko = fun k -> e.ko (fun v s -> k s v)} let get_res x = fun s0 -> x.ko (fun v s -> v) s0 end \end{code} The implementations of |int|, |app|, |lam|, and so on are \emph{identical} to those of |RCN| and elided. New are the extended type |repr|, which now includes the state, and the functions |lapp|, |deref|, and |set| representing imperative features. The interpreter is still CBN, so evaluating |app ef ea| might not evaluate |ea|, but evaluating |lapp ea ef| always does. For first-order state, such as of type~$\ZZ$, we instantiate the interpreter as \begin{code} module RCPSI = RCPS(struct type state = int type 'c states = int type ('c,'sv,'dv) repr = {ko: 'w. ('sv -> 'c states -> 'w) -> 'c states -> 'w} end) \end{code} If the state has a higher-order type, then the types |state| and |'c states| are no longer the same, and |'c states| is mutually recursive with the type |('c,'sv,'dv) repr|, as demonstrated in the accompanying source code. Because the |SymSI| signature extends |Symantics|, any encoding of a term in the pure object language (that is, any functor that takes a |Symantics| module as argument) can also be used as a term in the extended object language (for example, applied to an implementation of |SymSI|). In particular, |RCPSI| matches the |Symantics| signature and implements the unextended object language: we can pass |RCPSI| to the functor |EX| (Figure~\ref{fig:ocaml-simple}) and run the example |test1| from there. The main use for |RCPSI| is to interpret the extended language. \begin{code} module EXPSI_INT = EXSI_INT(RCPSI) let cpsitesti1 = RCPSI.get_res (EXPSI_INT.test1 ()) 100 val cpsitesti1 : int = 102 \end{code} We reiterate that this implementation adding state and imperative features is very close to the CPS interpreter and uses no new techniques. We can also add mutable references to the object language using mutable references of the metalanguage, as shown in the accompanying code. Yet another way to add side effects to the object language is to write a monadic interpreter (for a specific monad or a general class of monads), which can be structured as a module matching the |Symantics1| signature in Figure~\ref{fig:inductive}. \fi \fi \ifshort\else \begin{comment} \jacques{Should we really eliminate this entirely?} \section{Polymorphism over interpreters} It is crucial to our achievements above that the same object term can be interpreted by multiple interpreters, whether an evaluator, compiler, partial evaluator, or CPS transformer. In the typical \emph{initial} approach to DSL embedding, interpretation is represented in the metalanguage by applying the interpreter to the object term. Flipping this application around, our \emph{final} approach applies the object term to the interpreter. In this section, we explain how this flip amounts to Church-encoding the recursive GADT of well-typed object terms and how it affects the notion of self-interpretation and Jones optimality. At the type level, an interpreter is represented by its type constructor |repr|. For example, a |Symantics| module or instance that matches the simple encoding in~\S\ref{language} is in System~$F_\omega$ an interpreter , over which an object term is a polymorphic function---either an ML functor from a |Symantics| module containing |repr| or a Haskell value with a |Symantics| constraint over |repr|. \section{Self-interpretation}\label{selfinterp} We turn to interpreting the object language in the object language, to clarify how expressive our typed object language can be and to argue that our partial evaluator is Jones\hyp optimal. Given an \emph{encoding} of each object term~$e$ as an \emph{object} term~$\Encode{e}$, a \emph{self\hyp interpreter} is usually defined as an object function~$\si$ such that any object term~$e$ is observationally equivalent to the object application $\si\Encode{e}$ \citep{jones-partial,taha-tag,Danvy-tagging-encoding}. A particular use of self-interpreters is in defining the notion of optimality of partial evaluation. A partial evaluator~$\pe$ maps object terms~$e$ to observationally equivalent object terms~$\pe(e)$. Recall that $\si\Encode{e}$ is also observationally equivalent to $e$ but less efficient (reducing $\si\Encode{e}$ takes more resources). A partial evaluator is said to be \emph{optimal} with respect to~|si| \citep{jones-challenging} if the partial evaluator removes all the ineffiency in $\si\Encode{e}$ added by the self-interpreter: that is, for all $e$, $\pe(\si\Encode{e})$ is $\alpha$\hyp equivalent to~$e$ (or in some accounts, no less efficient than~$e$). Self\hyp interpretation in our framework is straightforward at the term level: the functions comprising the interpreters in~\S\ref{S:interpreter-RL} may as well be written in our object language, as in Figure~\ref{fig:self-eval}. \begin{figure} \begin{align*} \ident{int} &= \fun{x} x \\ \ident{add} &= \fun{x} \fun{y} x+y \\ \ident{if\_}&= \fun{b} \fun{t} \fun{e} \cond{b}{t\,0}{e\,0} \displaybreak[0] \\ \ident{lam} &= \fun{f} f \\ \ident{app} &= \fun{f} \fun{x} fx \\ \ident{fix} &= \fun{g} \fix{f} \fun{x} gfx \end{align*} \caption{The \emph{object} functions implementing an evaluator. (We use the number~$0$ in lieu of a unit value.)} \label{fig:self-eval} \end{figure} We thus map each object term~$e$ to an object term~$\encode{e}$ as follows. We call this mapping \emph{pre-encoding}. \begin{equation} \begin{split} \encode{x} &= x \\ \encode{n} &= \ident{int}\, n \\ \encode{\fun{x}e} &= \ident{lam} (\fun{x} \encode{e}) \\ \encode{e_1 + e_2} &= \ident{add} \encode{e_1} \encode{e_2} \\ \encode{\cond{b}{t}{e}} &= \rlap{$\ident{if\_} \encode{b} \left(\fun{\_}\encode{t}\right) \left(\fun{\_}\encode{e}\right)$} \\ \encode{fx} &= \ident{app} \encode{f} \encode{x} \\ \encode{\fix{f}e} &= \ident{fix} (\fun{f} \encode{e}) \end{split} \end{equation} The metavariables $x$ and~$n$ stand for a variable and an integer, respectively. This pre-encoding is just like how we represent object terms in the metalanguage in the preceding sections, but it produces terms in the object language rather than the metalanguage. To evaluate~$\encode{e}$, then, we instantiate the free variables in~$\encode{e}$ such as $\ident{int}$, $\ident{lam}$, and $\ident{add}$ by their definitions given in Figure~\ref{fig:self-eval}. For example, the familiar object term $(\fun{x}x)\True$ pre-encodes to \begin{equation} \encode{(\fun{x}x)\True} = \ident{app} (\ident{lam} (\fun{x} x))\, (\ident{bool}\, \True), \end{equation} and to evaluate this pre-encoded term is to evaluate the object term \begin{equation} (\fun{f} \fun{x} fx) \, ((\fun{f} f) (\fun{x} x))\, ((\fun{b} b) \True). \end{equation} Because the evaluator in Figure~\ref{fig:self-eval} mostly consists of glorified identity functions, our simple partial evaluator reduces the result of this instantiation to~$e$. In general, to interpret~$\encode{e}$ using an interpreter is to instantiate its free variables by that interpreter's definitions. \subsection{Avoiding higher polymorphism} Any approach to self\hyp interpretation needs to spell out first how to encode object terms~$e$ to object terms~$\Encode{e}$, and then how to interpret~$\Encode{e}$ in the object language. For our approach, we want to define encoding in terms of pre-encoding, and interpretation using some notion of instantiation. Unfortunately, the simple type structure of our object language hinders both tasks. To continue with the example term above, we could try to define \begin{equation} \Encode{e} = \fun{\ident{app}} \fun{\ident{lam}} \fun{\ident{bool}} \encode{e}, \end{equation} in particular \begin{equation} \Encode{(\fun{x}x)\True} = \fun{\ident{app}} \fun{\ident{lam}} \fun{\ident{bool}} \ident{app} (\ident{lam} (\fun{x} x))\, (\ident{bool}\, \True). \end{equation} To type-check this encoded term, we give the bound variable $\ident{lam}$ the simple type $(\BB\to\BB)\to\BB\to\BB$. We then define the self\hyp interpreter \begin{equation} \si = \fun{e} e (\fun{f} \fun{x} fx) (\fun{f} f) (\fun{b} b) \end{equation} and apply it to the encoded term. The result is the object term \begin{equation} \kern-\mintagsep \bigl(\fun{e} e (\fun{f} \fun{x} fx) (\fun{f} f) (\fun{b} b)\bigr) \bigl( \fun{\ident{app}} \fun{\ident{lam}} \fun{\ident{bool}} \ident{app} (\ident{lam} (\fun{x} x))\, (\ident{bool}\, \True) \bigr), \end{equation} which partially evaluates to~$\True$ easily. However, encoding fails on a term with multiple $\lambda$\hyp abstractions at different types. For example, the pre-encoding \begin{equation} \label{e:pre-encoding-example} \encode{\fun{f}\fun{x}fx} = \ident{lam} (\fun{f} \ident{lam} (\fun{x} \ident{app} f x)) \end{equation} does not type-check in any typing environment, because $\ident{lam}$ needs to take two incompatible types. In sum, we need more polymorphism in the object type system to type $\ident{lam}$, $\ident{app}$, $\ident{fix}$, and~$\ident{if\_}$ (and $\Encode{e}$ and~$\si$). (The polytypes in |Symantics| given by Haskell's type classes and OCaml's modules supply this polymorphism.) Moreover, we need to encode any polymorphism of the object language \emph{into} the object language to achieve \emph{self}\hyp interpretation. \subsection{Introducing let-bound polymorphism} Instead of adding higher-rank and higher-kind polymorphism to our object language (along with polymorphism over kinds!), we add let-bound polymorphism. As usual, we can add a new typing rule \begin{equation} \label{e:let} \let\vcenter\vbox \begin{prooftree} e_1:t_1 \quad \subst{e_2}{x}{e_1}:t_2 \justifies \be{x=e_1} e_2 : t_2 \end{prooftree} . \end{equation} The pre-encoding of a let\hyp expression is trivial. \begin{align} \encode{\be{x=e_1}e_2} \quad &= \quad \be{x=\encode{e_1}} \encode{e_2} \intertext{A \emph{context} is an object term with a hole~$[~]$. The hole may occur under a binder, so plugging a term into the context may capture free variables of the term. By pre-encoding a hole to a hole, we extend pre-encoding from a translation on terms to one on contexts.} \encode{[~]} \quad &= \quad [~] \end{align} We define an interpreter in the object language to be not a term but a context. For example, the evaluator is the context \begin{align} \label{e:evaluator} \si[~] &= \begin{tabular}[t]{@{}Ml@{}>{{}}Ml@{}Ml@{}} \be{\ident{int} &= \fun{x} x&} \\ \be{\ident{add} &= \fun{x} \fun{y} x+y&} \\ \be{\ident{if\_}&= \fun{b} \fun{t} \fun{e} \cond{b}{t\,0}{e\,0}&} \\ \be{\ident{lam} &= \fun{f} f&} \\ \be{\ident{app} &= \fun{f} \fun{x} fx&} \\ \be{\ident{fix} &= \fun{g} \fix{f} \fun{x} gfx&} [~], \end{tabular} \intertext{and the length\hyp measurer at the end of~\S\ref{S:interpreter-RL} is the context} \label{e:length-measurer} \mathrm{L}[~] &= \begin{tabular}[t]{@{}Ml@{}>{{}}Ml@{}Ml@{}} \be{\ident{int} &= \fun{x} 1&} \\ \be{\ident{add} &= \fun{x} \fun{y} x+y+1&} \\ \be{\ident{if\_}&= b + t\,0 + e\,0&} \\ \be{\ident{lam} &= \fun{f} f\,0 + 1&} \\ \be{\ident{app} &= \fun{f} \fun{x} f + x + 1&} \\ \be{\ident{fix} &= \fun{g} g\,0 + 1&} [~]. \end{tabular} \end{align} To interpret an object term~$e$ using an interpreter $I[~]$ is to evaluate the object term~$I[\encode{e}]$. $\si$~is a self\hyp interpreter in the following sense. \begin{proposition} $\si[\encode{e}]$ is observationally equivalent to~$e$. \end{proposition} As a corollary, we can pre-encode $\si$ itself as a context: the term $\si[\encode{\si[\encode{e}]}]$ is observationally equivalent to $\si[\encode{e}]$, and in turn to~$e$. In other words, $\si$ can interpret itself. Our partial evaluator is optimal with respect to the self\hyp interpreter~$\si$. \begin{proposition} Let $\pe$ be the partial evaluator~|P| in~\S\ref{S:PE-solution}. Then the object terms $\pe(\si[\encode{e}])$ and~$\pe(e)$ are either both undefined or both defined and equal up to $\alpha$\hyp conversion. \end{proposition} \subsection{Contexts clarify polymorphism} \label{S:clarify} We always type-check a pre-encoded term~$\encode{e}$ in the context of a particular interpreter~$I[~]$, never alone. For example, to type-check the pre-encoded term~\eqref{e:pre-encoding-example}, we must plug it into an interpreter, such as the evaluator~\eqref{e:evaluator}. This treatment has the drawback that we must duplicate the pre-encoding of a term in order to interpret it in multiple ways, that is, to plug it into multiple interpreters such as the evaluator~\eqref{e:evaluator} and the length\hyp measurer~\eqref{e:length-measurer}. In return, we avoid adding polymorphism to the object language's type system, because we can state the let rule~\eqref{e:let} in terms of substitution rather than by generalizing and instantiating types. In other words, we use contexts and plugging in the object language in place of the type-class and module machinery in the metalanguages. In the presence of let-bound polymorphism, we can understand a term waiting to be plugged into a context as a higher-rank and higher-kind abstraction over the context. Even though our object language does not support higher abstraction, our metalanguages do, so they can type-check an object term separately from its interpreter---either as a functor from a |Symantics| module containing a type constructor (in OCaml), or a value with a |Symantics| constraint over a type constructor (in Haskell). Thus, ``context'' is a euphemism for a polymorphic argument, and ``plugging'' is a euphemism for application. \end{comment} \begin{comment} \jacques{But how do you create, in either Haskell or MetaOCaml, an untypechecked interpreter-with-a-hole [UIH] ?} \oleg{Well, one can make an argument that we already have such an interpreter with polymorphic let and the hole: incope. In Haskell, the declaration of an instance of Symantics is like the sequence of polymorphic lets. We construct terms where lam, add, etc, are free variables. We apply the interpreter to the semantics (plug the hole) by instantiating these terms (binding the free variables lam, etc. to the particular instance of Symantics). The unRR construction does this plugging in explicitly.} Again, what is different from the above is that we can typecheck terms separately, without inserting them first within the hole of a particular interpreter. Rank-2 type of |repr| helps. It lets enough of the type information out so the typechecking can proceed. So, |repr| is the representation of the polymorphic interpreter context with the hole, which permits separately typecheckable terms (the evaluation still entails `duplication' so to speak -- which is one way how polymorphism is resolved). \oleg{Mention the RR interpreter: for any term E, (RR E) is equivalent to E. RR is not an identity: it is an interpreter that encapsulates another interpreter. The RR interpreter can be made self if we treat RR as a special form and interpret it always with itself (similar to let and hole below).} \end{comment} \begin{comment} \jacques{From what other people have written (like those who refer to our work), it seems these comments of ours were prescient and not altogether obvious. We should consider adding some comments about the crucial role of 'repr' (r below) somewhere in the paper} The crucial role of the higher-order type parameter r The type constructor "r" above represents a particular interpreter. The meta-type "r tau" hides how the interpreter represents the object type "tau" yet exposes enough of the type information so we can type-check the encoding of an object term without knowing what "r" is. The checked term is then well-typed in any interpreter. Each instance of Symantics instantiates "r" to interpret terms in a particular way. The L interpreter is quite illustrative. We need the above semantics to be able to represent both R and L (the latter returns only Int as the values) in the same framework. We encode a term like |add 1 2| as \texttt{app \_add (app \_int 1) (app \_int 2)} where |_add| and |_int| are just `free variables'. Now, how to typecheck such a term? Some type should be assigned to these free variables. The goal is to complete the work without needing any type annotations (so we don't have to introduce any type language), with all types inferred and all terms typed. It seems the second-order type R neatly separates the typechecking part from the representation of R: it hides aspects that depend on the particular interpreter, and yet lets enough type information through (via its type argument) to permit the typechecking of terms, and infer all the types. The only approach that does seem to work is the one in incope.hs or incope.ml. If we de-sugar away records and type-classes, the type of a term L of the inferred type tau is $$ (\ZZ \rightarrow r \ZZ) \rightarrow (\BB \rightarrow r \BB) \rightarrow \forall \alpha \beta. (r \alpha \rightarrow r \beta) \rightarrow r (\alpha\rightarrow\beta) \rightarrow ... r \tau $$ % (Int -> r Int) -> % (Bool -> r Bool) -> % (forall alpha beta. (r alpha -> r beta) -> r (alpha->beta)) -> ... r tau or, if we denote the sequence of initial arguments as |S r|, terms have the type |S r -> r tau| The interpreter has the type |(forall r. S r -> r tau) -> r' tau| The higher-order type (variable) of kind |*->*| seems essential. So, at least we need some fragment of Fw (somehow our OCaml code manages to avoid the full Fw; probably because the module language is separated from the term language). Thus, we seem to need a fragment of Fw. It seems the inference is possible, as our Haskell and OCaml code constructively illustrates. Perhaps we need to characterize our fragment. \end{comment} \fi \section{Related work}\label{related} \ifshort Our initial motivation came from several papers \citep{WalidICFP02,taha-tag,xi-guarded,peyton-jones-simple} that use embedded interpreters to justify advanced type systems, in particular GADTs. \else Our initial motivation came from several papers that justify advanced type systems, in particular GADTs, by embedded interpreters \citep{WalidICFP02,taha-tag,xi-guarded,peyton-jones-simple} and CPS transformations \citep{Guillemette-Monier-PLPV,shao-type-toplas,chen-typeful}. \fi We admire all this technical machinery, but these motivating examples do not need it. Although GADTs may indeed be simpler and more flexible, they are unavailable in mainstream ML, and their implementation in GHC 6.8 fails to detect exhaustive pattern matching. We also wanted to find the minimal set of widespread language features needed for tagless type-preserving interpretation. The simply typed $\lambda$-calculus can interpret itself, provided we use universal types \citep{taha-tag}. The ensuing tagging overhead motivated \citet{Makholm-TagElim,taha-tag} to propose \emph{tag elimination}, which however does not statically guarantee that all tags will be removed \citep{WalidICFP02}. \Citet{WalidICFP02}, \citet{taha-tag}, \citet{xi-guarded}, and \citet{peyton-jones-simple} seem to argue that a typed interpreter of a typed language cannot be tagless without advanced types, based on the premise that the only way to encode a typed language in a typed language is to use a sum type (at some level of the hierarchy). While the logic is sound, we (following \citet{yang-encoding}) showed that the premise is not valid. \citet{Danvy-tagging-encoding} discuss Jones optimality at length and apply HOAS to typed self\hyp interpretation. However, their source language is untyped. Therefore, their object\hyp term encoding has tags, and their interpreter can raise run-time errors. Nevertheless, HOAS lets the partial evaluator remove all the tags. In contrast, our object encoding and interpreters do not have tags to start with and obviously cannot raise run-time errors. Our separation between the |Symantics| interface and its many implementations codifies the common practice of implementing an embedded DSL by specifying an abstract syntax of object\hyp language pervasives, such as addition and application, then providing multiple interpretations of them. The techniques we use to form such a family of interpreters find their origins in \citearound{'s \emph{language triplets}}\citet{Holst-AMIX}, though in an untyped setting. \Citet{JonesNielson:94:AbstractInterpretation} also prefigured this separation when they decomposed a denotational definition of an untyped object language into a core semantics (which we call abstract syntax) and multiple interpretations. In the typed setting, \citet{Nielson88} expressed families of program analyses on typed object languages using a typed $\lambda$-calculus as a metalanguage; however, the embeddings of the object language and the analyses are not type-checked in the metalanguage, unlike with our |Symantics| signature. When implementing a typed, embedded DSL\@, it is also common practice to use phantom types to rule out ill-typed object terms, as done in Lava \citep{Lava} and by \citet{Rhiger-thesis}. However, these two approaches are not tagless because they still use universal types, such as Lava's \texttt{Bit} and \texttt{NumSig}, and Rhiger's \texttt{Raw} (his Figure~2.2) and \texttt{Term} (his Chap.~3), which incur the attendant overhead of pattern matching. The universal type also greatly complicates the soundness and completeness proofs of embedding \citep{Rhiger-thesis}, whereas our proofs are trivial. Rhiger's approach does not support typed CPS transformation (his~\S3.3.4). \begin{comment} Rhiger's But Fig 2.2, p33: universal type Raw. He uses phantom type upon the Exp datatype. But that is cheating: phantom type means essentially we can easily do coerce. We use real types. That's why he had to do tedious proofs in Sec 2 of soundness and completeness of embedding. Whereas our proofs are obvious. His sec 3 is based on data representation of terms. They have type tags. We do nothing of that kind: See Sec 3.1.2. See numerous "data Term" in Sec3, which is the U type. In Sec 3.3.4 (p76) Rhiger specifically says that his encoding cannot do typed CPS transformation -- whereas our does. BTW, Rhiger thesis contains the definitions of the interpreter and the compiler, in the beginning. Use this in response to Rev1 \end{comment} \Citet{Thiemann-GenClass} implemented a set of \emph{binding-time polymorphic} combinators in Gofer, using many constructor classes. By merging all their classes into one and dropping polymorphic lift, they could have invented |Symantics|. We are not the first to implement a typed interpreter for a typed language. \Citet{laod93} use type classes to implement a metacircular interpreter of a typed version of the SK language, which is quite different from our object language. Their interpreter appears to be tagless, but they could not have implemented a compiler or partial evaluator in the same way, since they rely heavily on injection\hyp projection pairs. % The following paragraph was not in the version submitted to APLAS, % yet was not commented out. Of course, if we had space, we should add % this, but it is difficult to see how. \ifshort\else Using Haskell, \citet{Guillemette-Monier-PLPV} implement a CPS transformation for HOAS terms and statically assure that it preserves object types. They represent proofs of type preservation as terms of a GADT, which is not sound (as they admit in \S4.2) without a separate totality check because any type is trivially inhabited by a nonterminating term in Haskell. In contrast, our CPS transformations use simpler types than GADTs and assure type preservation at the (terminating) type level rather than the term level of the metalanguage. \Citeauthor{Guillemette-Monier-PLPV} review other type\hyp preserving CPS transformations (mainly in the context of typed intermediate languages), in particular \citets{shao-type-toplas} and \citets{chen-typeful}. These approaches use de Bruijn indices and fancier type systems with type-level functions, GADTs, or type\hyp equality proofs. \fi We encode terms in elimination form, as a coalgebraic structure. \Citet{Pfenning-Lee} first described this basic idea and applied it to metacircular interpretation. Our approach, however, can be implemented in mainstream ML and supports type inference, typed CPS transformation and partial evaluation. In contrast, \citeauthor{Pfenning-Lee} conclude that partial evaluation and program transformations ``do not seem to be expressible'' even using their extension to~$F_\omega$, perhaps because their avoidance of general recursive types compels them to include the polymorphic lift that we avoid in~\S\ref{S:PE-lift}. \begin{comment} It seems that Pfenning and Lee embed $F_2$ with type constructions in (pure) $F_3$. We embed $F_1$ in (weak?) $F_2$, as I see it. In a way, what we do is very similar to what they do (Figure 1, p.152), except that we do it in standard programming languages. It is unclear if their work can be implemented (yet) in any language. And we preserve type-inference, while their solution needs explicit types! The following line of their conclusion is worth citing: "... this does not imply that the same language is also suitable for type metaprogramming. ... such as partial evaluation... do not seem to be expressible". I suspect you're right, but I'm still reading the paper. See also page 146: "for a term M in $F_1$ (a simply-typed term), the representation $\bar{M}$ will be in $F_2$". The move from $F_2$ to $F_3$ and beyond reminds me strongly of our attempts at self-interpretation without the notion of a syntactic hole. \end{comment} We could not find work that establishes that the \emph{typed} $\lambda$-calculus has a final coalgebra structure. \ifshort (See \Citet{honsell99coinductive} for the untyped case.) \else \Citet{HonsellLenisa,honsell99coinductive} investigate the untyped $\lambda$-calculus along this line. \begin{comment} In particular, they use contexts with a hole \citep[p.\,13]{honsell99coinductive} to define \emph{observational equivalence} (see our~\S\ref{selfinterp}). \end{comment} \citearound{'s bibliography}\Citet{honsell99coinductive} refers to the foundational work in this important area. Particularly intriguing is the link to the coinductive aspects of B\"{o}hm trees, as pointed out by \citet{berarducci-models} and Jacobs \citeyearpar[Example 4.3.4]{jacobs-coalgebra}. \fi Other researchers have very recently realized that it is useful to abstract over higher-kinded types, like our |repr|. \Citet{moors-generics-higher-kind} put the same power to work in Scala. \Citet{HORM08} also use Scala and note that they are influenced by our work \citep{carette-kiselyov-shan-aplas}. Where we have concentrated on multiple efficient interpretations of the same language, they have concentrated on composing the languages and interpretations. \section{Conclusions}\label{conclusion} We solve the problem of embedding a typed object language in a typed metalanguage without using GADTs, dependent types, or a universal type. Our family of interpreters includes an evaluator, a compiler, a partial evaluator, and CPS transformers. It is patent that they never get stuck, because we represent object types as metalanguage types. This work improves the safety and reduces the overhead of embedding DSLs in practical metalanguages such as Haskell and ML\@. Our main idea is to represent object programs not in an initial algebra but using the existing coalgebraic structure of the $\lambda$-calculus. More generally, to squeeze more invariants out of a type system as simple as Hindley-Milner, we shift the burden of representation and computation from consumers to producers: encoding object terms as calls to metalanguage functions (\S\ref{ourapproach}); build dynamic terms alongside static ones (\S\ref{S:PE-lift}); simulating type functions for partial evaluation (\S\ref{S:PE-solution}) and CPS transformation\ifshort\else~(\S\ref{S:CPS})\fi. This shift also underlies fusion, functionalization, and amortized complexity analysis. \ifshort\else When the metalanguage does provide higher-rank and higher-kind polymorphism, we can type-check and compile an object term separately from any interpreters it may be plugged into. \fi
{ "alphanum_fraction": 0.7165586882, "avg_line_length": 42.8660812294, "ext": "tex", "hexsha": "6323be63b4104c20813fd9d98aa2618b544af506", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "964bd1b4c88d5713527e115529ec762ba3f0dd5f", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "JacquesCarette/finally-tagless", "max_forks_repo_path": "tagless/tagless-final.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "964bd1b4c88d5713527e115529ec762ba3f0dd5f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "JacquesCarette/finally-tagless", "max_issues_repo_path": "tagless/tagless-final.tex", "max_line_length": 109, "max_stars_count": 30, "max_stars_repo_head_hexsha": "964bd1b4c88d5713527e115529ec762ba3f0dd5f", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "JacquesCarette/finally-tagless", "max_stars_repo_path": "tagless/tagless-final.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-05T01:28:52.000Z", "max_stars_repo_stars_event_min_datetime": "2018-11-29T15:58:25.000Z", "num_tokens": 34476, "size": 117153 }
\subsection {ATE abd Selection Bias} When we observe a group of subjects, some of whom have been treated and some not, we calculate \begin {equation} \begin {split} & \EE(Y_i(1) \g W_i = 1) - \EE(Y_i(0) \g W_i = 0) \iff \\ & [ \EE(Y_i(1) \g W_i = 1) - \EE(Y_i(0) \g W_i = 1) ] + \\ & \qquad \EE(Y_i(0) \g W_i = 1) - \EE(Y_i(0) \g W_i = 0) ] \iff \\ & \text {Average Treatment Effect} + \text { Selection Bias} \\ \end {split} \end {equation} \subsection {Fisher exact test in R} \begin{lstlisting} library(perm) perms <- chooseMatrix(6, 3) A <- matrix(c(38.2, 37.1, 37.6, 36.4, 37.3, 36), nrow=6, ncol=1, byrow=TRUE) is.treatment <- c(1, 1, 1, 0, 0, 0) n_treatment <- sum(is.treatment) n_control <- length(A) - sum(is.treatment) treatment_avg <- (1/n_treatment) * perms %*% A control_avg <- (1/3) * (1-perms) %*% A test_statistic <- abs(treatment_avg - control_avg) rownumber <- apply(apply(perms, 1, function(x) (x == is.treatment)), 2, sum) rownumber <- (rownumber == length(A)) observed_test <- test_statistic[rownumber == TRUE] larger_than_observed <- (test_statistic >= observed_test) sum(larger_than_observed) / length(test_statistic) \end{lstlisting} \subsection {Fixed effects model} \label {r: fem} G(factor(admin)) creates a dummy variable, one per region, and includes the dummy variable in the regression \begin{lstlisting}[language=R] library("lfe") model2 <- felm(sex ~ teasown + post + teapost + G(factor(admin)), data = qiandata) summary(model2) \end{lstlisting} % \subsection {Instrument variable regression} % To use the ivreg() function directly one needs to setup the call as % \begin{lstlisting}[language=R] % ivreg (y ~ exogenous_vars + endogenous_var | % exogenous_vars + instrument_var) % \end{lstlisting} \subsection{Neyman analysis} \begin{lstlisting} data <- read.csv("data_myData.csv") treatment <- data$T == 1 treatment_mean <- mean(data$Y[treatment], na.rm=TRUE) control_mean <- mean(data$Y[!treatment], na.rm=TRUE) ate <- treatment_mean - control_mean Nc <- sum(!treatment) Nt <- sum(treatment) Sc2 <- (1/(Nc-1)) * sum( (data$Y[!treatment] - control_mean)^2 ) St2 <- (1/(Nt-1)) * sum( (data$Y[treatment] - treatment_mean)^2 ) Vneyman <- (Sc2/Nc + St2/Nt) # N > 30 ub <- ate + 1.96 * sqrt(Vneyman) lb <- ate - 1.96 * sqrt(Vneyman) # t-distribution analysis g <- ( Vneyman^2 / ( (St2/Nt)^2/(Nt+1) + (Sc2/Nc)^2/(Nc+1) ) ) - 2 ub <- ate + qt(0.975, g) * sqrt(Vneyman) lb <- ate - qt(0.975, g) * sqrt(Vneyman) \end{lstlisting} \subsection{Regression commands} \begin{lstlisting} hprices <- read.csv("data_House_Prices_and_Crime_1.csv") str(hprices) summary(hprices) subset(hprices, index_nsa == 54.29) model1 <- lm(index_nsa ~ Homicides + Robberies + Assaults, data=hprices) summary(model1) confint(model1) \end{lstlisting} \subsection{F-test, restricted model} \begin{lstlisting} model_unrest <- lm(index_nsa ~ Homicides + Robberies + Assaults, data=hprices) anova_unrest <- anova(model_unrest) model_rest <- lm(index_nsa ~ I(Homicides-Assaults) + I(Robberies-Assaults), data=hprices) anova_rest <- anova(model_rest) # F statistic r <- 1 k <- 3 ssr_u <- anova_unrest$`Sum Sq`[4] ssr_r <- anova_rest$`Sum Sq`[length(anova_rest$`Sum Sq`)] statistic_test <- (((ssr_r - ssr_u)/r) / ((ssr_u) / anova_unrest$Df[4])) pvalue <- df(statistic_test, r, anova_unrest$Df[4]) \end{lstlisting} \subsection{QQ-plots} The R base functions \lstinline{qqnorm()} and \lstinline{qqline()} can be used to produce quantile-quantile plots: \begin{lstlisting} data <- runif(1000) qqnorm(data) qqline(data) \end{lstlisting} \subsection{Density plots} \begin{lstlisting} data <- runif(1000) d <- density(data, bw = 0.01) plot(d) \end{lstlisting} \subsection{Difference in difference model} \begin{lstlisting} manufacturing <- read.csv("data_manufacturing.csv") # Cleaning library(tidyverse) manu <- manufacturing %>% filter((year == 1987 | year == 1988) & !is.na(scrap)) %>% select(year, fcode, scrap, grant) treated <- manu %>% filter(grant == 1) %>% select(fcode) treated_firms <- treated$fcode # Add column 'treated' manu <- manu %>% mutate(treated = 1*(fcode %in% treated_firms)) # Average treatment and control did_results <- manu %>% group_by(year, treated) %>% summarize(promedio = mean(scrap), n = n()) did_results # DiD model did_model <- lm(scrap ~ treated + I(year==1988) + I(treated*(year==1988)), data=manu) \end{lstlisting} \subsection{R distribution functions} The functions for the density/mass function, cumulative distribution function, quantile function and random variate generation are named in the form \lstinline{dxxx}, \lstinline{pxxx}, \lstinline{qxxx} and \lstinline{rxxx} respectively. \begin{itemize} \item For the beta distribution see \lstinline{dbeta}. \item For the binomial (including Bernoulli) distribution see \lstinline{dbinom}. \item For the Cauchy distribution see \lstinline{dcauchy}. \item For the chi-squared distribution see \lstinline{dchisq}. \item For the exponential distribution see \lstinline{dexp}. \item For the F distribution see \lstinline{df}. \item For the gamma distribution see \lstinline{dgamma}. \item For the geometric distribution see \lstinline{dgeom}. (This is also a special case of the negative binomial.) \item For the hypergeometric distribution see \lstinline{dhyper}. \item For the log-normal distribution see \lstinline{dlnorm}. \item For the multinomial distribution see \lstinline{dmultinom}. \item For the negative binomial distribution see \lstinline{dnbinom}. \item For the normal distribution see \lstinline{dnorm}. \item For the Poisson distribution see \lstinline{dpois}. \item For the Student's t distribution see \lstinline{dt}. \item For the uniform distribution see \lstinline{dunif}. \item For the Weibull distribution see \lstinline{dweibull}. \end{itemize}
{ "alphanum_fraction": 0.7118527438, "avg_line_length": 33.408045977, "ext": "tex", "hexsha": "fdacb1de1c3d4765ad2393e9d404157956d3bbec", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-11-02T14:40:57.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-02T14:40:57.000Z", "max_forks_repo_head_hexsha": "a1ef693f8a37c7931900f1721743b1d838ea9908", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "r2cp/MITx_capstone_2", "max_forks_repo_path": "content/r-help.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a1ef693f8a37c7931900f1721743b1d838ea9908", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "r2cp/MITx_capstone_2", "max_issues_repo_path": "content/r-help.tex", "max_line_length": 236, "max_stars_count": null, "max_stars_repo_head_hexsha": "a1ef693f8a37c7931900f1721743b1d838ea9908", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "r2cp/MITx_capstone_2", "max_stars_repo_path": "content/r-help.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1853, "size": 5813 }
%% Based off `template.tex'. %% Copyright 2006-2010 Xavier Danaux ([email protected]). %% %% Copyright 2010-2017 Raphaël Pinson ([email protected]). % % This work may be distributed and/or modified under the % conditions of the LaTeX Project Public License version 1.3c, % available at http://www.latex-project.org/lppl/. % Version: 20110122-4 \documentclass[11pt,a4paper,nolmodern]{moderncv} \usepackage{RaphaelPinson} \address{Chemin du Jura 3}{1041~Poliez-le-Grand}{Switzerland} \usepackage[english]{babel} \linespread{0.9} % for some reason, lines take up a lot of space in itemize in English... \newenvironment{tightitemize} {\begin{itemize} \setlength{\parskip}{0pt}} {\end{itemize}} % personal data \title{Open-Source Technologist} \extrainfo{% \linkedin~\httplink{www.linkedin.com/in/raphink}\\% \octocat~\httplink{www.github.com/raphink}\\% Driving License} % optional, remove the line if not wanted \myquote{Freely you have received, freely give}{Matthew 10:8} %\nopagenumbers{} % uncomment to suppress automatic page numbering for CVs longer than one page %---------------------------------------------------------------------------------- % content %---------------------------------------------------------------------------------- \begin{document} \setmainfont{Minion Pro} \setsansfont{Myriad Pro} \hyphenpenalty=10000 \maketitle \section{Skills} \subsection{Expert Skills} \cvcomputer{Configuration Management}{Puppet\contributor, Docker, Augeas\developer} {Debian OSes}{Debian\contributor, Ubuntu\developer} \subsection{Development} \cvcomputer{Languages}{C, Perl, Python, Shell/Bash, Ruby, Go, GNU Make, Ada, Lua} {Web}{HTML/XHTML, CSS, JavaScript, Ajax, Jquery, Bootstrap, PHP, Semantic Web} \cvcomputer{Frameworks}{Django, Google AppEngine, Google Wave, Netvibes} {Databases}{MySQL, PostgreSQL, NoSQL (GQL), SPARQL} \cvcomputer{Formats}{XML/XPath, SGML, YAML/JSON, RDF/RSS/FOAF} {Methods}{Object Oriented Programming, MVC, Unit Tests, UML} \cvcomputer{Source Management}{RCS/CVS, SVN, Bazaar, Git, Mercurial} {Tools}{Autotools, Cmake, Trac, Gforge, Launchpad, GitHub} \subsection{Systems and Networks Administration} \cvcomputer{Web}{Apache, Lighttpd, Nginx, Varnish, Bind, Squid, Dansguardian, VsFTPd} {Monitoring}{BB4/Hobbit, Nagios, Munin, Cacti, Graphite, Logstash} \cvcomputer{Mail}{Postfix, Exim, SSMTP} {Backup}{Amanda, BackupManager, Duplicity} \cvcomputer{Installation / Deployment}{FAI, Kickstart, Reprepro, Cfengine, Puppet\contributor, MCollective\contributor, Augeas\developer} {Networks}{DHCPd, VLANs, OpenVPN, Keepalived} \cvcomputer{Operating Systems}{GNU/Linux (Debian\contributor, Ubuntu\developer, RedHat), MacOS X, Windows, *BSD} {Databases}{MySQL, PostgreSQL, SQLite, BerkeleyDB, CouchDB} \cvcomputer{Virtualization}{Xen, VMWare, Docker, Rancher, Kubernetes, OpenShift, Openstack, Amazon Web Services} {Security}{SSL, PGP/GnuPG, OpenSSH} \subsection{Office and tools} \cvcomputer{Office}{OpenOffice/LibreOffice, Microsoft Office, Gimp, Inkscape} {Project Management}{TaskJuggler, GanttProject} \cvcomputer{Edition}{\TeX{}, \LaTeX{}, \XeTeX{}, \LuaTeX{}} {Wikis}{MediaWiki, MoinMoin} \devnotes{Developer}{Contributor} \newpage \section{Experience} \subsection{IT Experience} % Center labels and use "Since" %\tltextstart[base]{\scriptsize} %\tltextend[base]{\scriptsize} %\tlsince{Since~} \tlcventry{2012}{0}{Instrastructure Developer \& Trainer}{\href{http://www.camptocamp.com}{Camptocamp}}{}{Chambéry}% {Infrastructure Developer, Consultant \& Trainer for internal and client infrastructures. \begin{itemize} \item Systems Engineer: \begin{itemize} \item Operated a fleet of 500+ Puppet-operated servers (including servers for EPFL and Swisstopo); \item Architectured and deployed a secured MCollective cluster using RabbitMQ and SSL; \item Planned and executed migrations between major Puppet versions (0.25 to 2.7 and 2.7 to 3.5); \item Wrote and contributed to 100+ Puppet modules (mostly public), including plugins (facts, functions, types \& providers, indirectors), with unit tests; \item Managed servers hosted on Amazon Web Services (with Cloud Formation) and OpenStack; \item Packaged software for RedHat and Debian platforms. Operated and improved the package build systems and package repositories. \end{itemize} \item Training Leader: \begin{itemize} \item Initiated strategic partnerships ­with software editors (Puppet Labs, Red Hat); \item Taught official Puppet Labs curriculum (Puppet Fundamentals, Advanced Puppet, Extending Puppet with Ruby); \item Wrote and taught Augeas curriculum (Augeas Fundamentals); \item Taught teams from major universities/labs (CERN, EPFL), private banks (in France and Switzerland) and governmental IT teams. \end{itemize} \item Consultant: \begin{itemize} \item Performed Puppet/Devops consulting for private banks and governmental IT teams. \end{itemize} \end{itemize}} \tlcventry{2006}{2012}{Systems Engineer}{\href{http://www.orness.com}{ORNESS} then \href{http://www.alten.fr}{Alten}}{Sophia Antipolis}{}{Consultant at France T\'el\'ecom \begin{tightitemize} \item Administration of Cfengine, FAI and the software package repository; \item Renewal and industrialization of the software deployment system for Debian/Ubuntu; \item Management of the Gforge (collaborative development web interface) platform; \item Development of QA tools for systems and configurations policy compliance; \item Integration of the continuous integration platform (Hudson) with the Debian/Ubuntu software deployment system; \item Monitoring of the fleet of servers; \item Writing of technical documentation. \end{tightitemize}} \tlcventry{2008}{0}{Developer}{\href{http://www.augeas.net}{Augeas}}{Internet}{}{Development, bugfix and documentation \begin{tightitemize}% \item Writing of Augeas lenses; \item Coding in C; \item Improvement of autotools configuration; \item NaturalDocs Integration (documentation generator); \item International Conference Speaker (Belgium (\href{http://archive.fosdem.org/2009/schedule/events/fedora_augeas}{FOSDEM})). \end{tightitemize}} \tlcventry{2005}{0}{\href{https://launchpad.net/~raphink}{Developer/Maintainer}}{\href{http://www.ubuntu.com}{Ubuntu}}{Internet}{}{Development, bugfix and documentation \begin{tightitemize}% \item Creation, maintenance and review of software packages; \item Writing of technical documentation for developers; \item Maintenance of the QA system for software package; \item Bug management; \item International Conference Speaker (Hungary, Germany (\href{http://www.linuxtag.org/}{LinuxTag})). \end{tightitemize}} \tlcventry{2005}{0}{Open-Source Contributor}{Various projects}{Internet}{}{% \begin{tightitemize}% %\item \href{https://launchpad.net/wavebiblebot}{Flammard Bible Bot}: Google Wave Bot, a personal project written in Python/AppEngine; %\item \href{http://www.ichthux.com}{Ichthux}: Creation and maintenance of a specialized Linux distribution based on Ubuntu; %\item \href{https://launchpad.net/byobu}{Byobu}: contribution of scripts and patches in Python and Bash; \item \href{http://search.cpan.org/~raphink}{CPAN Author}: maintainer of several Perl modules on CPAN; \item \href{http://www.ctan.org/author/id/pinson}{CTAN Author}: maintainer of several \LaTeX{} packages on CTAN. \end{tightitemize}} \subsection{Other Experience} \tlcventry{2009}{0}{Translator and Editor}{La Colombe Calvary}{Nice}{}{% \begin{tightitemize}% \item Translation of English theology books into French; \item Edition of books using \LaTeX{}, \XeTeX{} and \LuaTeX{}; \item Publishing of books using on-demand publishing services (CreateSpace and Lulu). \end{tightitemize}} % Restore normal labels %\tltext{\scriptsize} %\tldatelabelcventry{2004}{July 2004}{Blue Collar Internship}{\href{http://www.snecma.com}{SNECMA}}{Moissy-Cramayel}{}{Assembled and equilibrated turbo reactors for planes} %\tldatelabelcventry{2002}{Summer 2002}{Fire Safety Officer}{\href{http://www.euroguard.fr/}{Euroguard}}{Marcoussis}{}{Supervised an Alcatel research site} %\tldatelabelcventry{2001}{Summer 2001}{Surveillance Agent}{Penauille Polys\'ecurit\'e}{Paris}{}{Supervised the headquarters of the French Red Cross} %\tllabelcventry{1999}{2000}{1999--2000}{Certified First Responder}{\href{http://www.croix-rouge.fr/}{French Red Cross}}{Paris Suburbs}{}{Served in several volunteer missions as a paramedic} \pagebreak \section{Education} \tldatecventry{2013}{Puppet Trainer}{\href{https://puppetlabs.com/services/puppet-training/}{PuppetLabs}}{Amsterdam}{}{Official Training Partner for PuppetLabs' courses (Puppet Fundamentals, Puppet Advanced, Extending Puppet with Ruby)} \tldatecventry{2009}{ITIL{\LARGE\textregistered} v3 Foundation}{\href{http://www.itil-officialsite.com/home/home.asp}{EXIN}}{Sophia Antipolis}{}{Organization and efficiency of the Information System} \tldatecventry{2005}{Student in Business Creation and Management}{\href{http://www.creation-transmission.com/}{Cr\'eafort}}{Poitiers}{}{Accounting, Management, Marketing, Law} \tldatecventry{2005}{Student in Pedagogy (`Gestion Mentale')}{\href{http://www.ifgm.org/}{IFGM}}{Bordeaux}{}{Pedagogy, Didacticism, Cognitive Psychology} \tllabelcventry{2003}{2005}{2003--2005}{Student in Mechanical Engineering}{\newline\href{http://www.ensma.fr}{\'Ecole Nationale Sup\'erieure de M\'ecanique et d'A\'erotechnique (ENSMA)}}{Poitiers}{}{Solid \& Fluid Mechanics, Mathematics, Combusion \& Propulsion, Materials Science, Automation, Signal Processing, Computer Sciences, Management} \tldatecventry{2004}{Licence, equivalent of a British Bsc, in Mechanics}{\href{http://www.univ-poitiers.fr/}{Universit\'e de Poitiers}}{Poitiers}{}{Solid \& Fluid Mechanics, Mathematics, Combusion \& Propulsion, Materials Science, Automation, Signal Processing, Computer Sciences, Management} \tllabelcventry{2002}{2003}{2002--2003}{Student in Chemistry}{\href{http://www.u-psud.fr}{Universit\'e de Paris XI}}{Orsay}{}{Chemistry, Physics, Mathematics} \tllabelcventry{2000}{2002}{2000-2002}{Student in Medicine (PCEM1)}{\href{http://www.u-psud.fr}{Universit\'e de Paris XI}}{Orsay}{}{Biology, Chemistry, Biochemistry, Physics, Anatomy, Histology, Physiology, Law} \tldatecventry{2000}{Baccalaur\'eat, equivalent of British A Level}{\href{http://www.ac-versailles.fr/etabliss/orsay/}{Lyc\'ee Blaise Pascal}}{Orsay}{with distinction `mention bien'}{} \section{Certifications} \tldatecventry{2019}{Red Hat Certified Specialist in OpenShift Administration}{\href{https://www.redhat.com/en/services/training/ex280-red-hat-certified-specialist-in-openshift-administration-exam}{Red Hat}}{190-035-931}{}{OpenShift} \tldatecventry{2013}{Puppet Certified Developer}{\href{https://puppetlabs.com/services/certification/puppet-developer/}{PuppetLabs}}{PCD0000011}{}{Puppet IT automation software} \tldatecventry{2013}{Puppet Certified Professional}{\href{https://puppetlabs.com/services/certification/puppet-professional/}{PuppetLabs}}{PCP0000116}{}{Puppet IT automation software} \tldatecventry{2009}{ITIL{\LARGE\textregistered} v3 Foundation Examination}{\href{http://www.itil-officialsite.com/home/home.asp}{EXIN}}{00055512}{}{Organization and efficiency of the Information System} \section{Foreign Languages} \cvlanguage{French}{Native}{Mother Tongue} \cvlanguage{English}{Fluent}{Daily practice, conferences given in English} \cvlanguage{Spanish}{Good Level}{Occasional practice} \cvlanguage{German}{Good Level}{Studied 9 years in school} %\cvlanguage{Russian}{Intermediary Level}{Studied 3 years in school} %\cvlanguage{Dutch}{Beginner}{Studied alone} %\cvlanguage{Swedish}{Beginner}{Studied alone} \section{Personal interests} \cvhobby{Music}{Clarinet, piano, organ, saxophone} \cvhobby{Sports}{Hiking, sailing, golf, climbing} \cvhobby{Contributions}{Wikipedia, OpenStreetMap} \cvhobby{Memberships}{President of the cultural association `la Colombe de Savoie'} \cvhobby{}{Ubuntu member} \cvhobby{Others}{Travels, reading} %\renewcommand{\listitemsymbol}{-} % change the symbol for lists % Publications from a BibTeX file without multibib\renewcommand*{\bibliographyitemlabel}{\@biblabel{\arabic{enumiv}}}% for BibTeX numerical labels %\nocite{*} %\bibliographystyle{plain} %\bibliography{publications} % 'publications' is the name of a BibTeX file % Publications from a BibTeX file using the multibib package %\section{Publications} %\nocitebook{book1,book2} %\bibliographystylebook{plain} %\bibliographybook{publications} % 'publications' is the name of a BibTeX file %\nocitemisc{misc1,misc2,misc3} %\bibliographystylemisc{plain} %\bibliographymisc{publications} % 'publications' is the name of a BibTeX file \end{document}
{ "alphanum_fraction": 0.7434995411, "avg_line_length": 51.078125, "ext": "tex", "hexsha": "9ebcb5371a3f67858651141393aaedf182b82ca9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "71b26deffd86ffd9d1d189e668553c36b5931a74", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "dendisuhubdy/CV-1", "max_forks_repo_path": "RaphaelPinson_en.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "71b26deffd86ffd9d1d189e668553c36b5931a74", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "dendisuhubdy/CV-1", "max_issues_repo_path": "RaphaelPinson_en.tex", "max_line_length": 343, "max_stars_count": null, "max_stars_repo_head_hexsha": "71b26deffd86ffd9d1d189e668553c36b5931a74", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "dendisuhubdy/CV-1", "max_stars_repo_path": "RaphaelPinson_en.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3614, "size": 13076 }
\documentclass{beamer} \usepackage{beamerthemesplit} %Activate for custom appearance \usetheme{Hannover} \usecolortheme{rose} %Unnumbered footnotes \newcommand\ufoot[1]{ \begingroup \renewcommand\thefootnote{}\footnote{#1} \addtocounter{footnote}{-1} \endgroup } \title{Introduction to Git and Github Part I} \subtitle{IB 516 Analytical Workflows} \author{Ben Dalziel} \date{\today} \begin{document} \frame{\titlepage} \section[Outline]{} \frame{\tableofcontents} \section{Introduction to Version Control} \frame{ \frametitle{What is a version control system?} \begin{itemize} \item Records changes in a set of files over time so that you can recall specific versions later. \item Allows 'time travel' back and forth from any previous time in your project development. \item Allows 'peaceful coexistence and exchange of info between 'parallel universes' of a project' \item Allows stable and efficient collaborations (with others and with your past self) that produce reproducible work. \end{itemize} } \frame{ \frametitle{In practice} \begin{itemize} \item Revert selected files back to a previous state. \item Revert the entire project back to a previous state. \item Compare changes over time. \item See who last modified something that might be causing a problem, who introduced an issue and when, and more. \item If you screw things up or lose files, you can easily recover. \item You get all this for very little effort / overhead (aside from startup costs for learning). \end{itemize} \ufoot{\tiny{https://git-scm.com/book/en/v2/Getting-Started-About-Version-Control}} } \frame{ \frametitle{Visualizing a version controlled project through time} \includegraphics[scale = 0.3]{figs/pretty_branch_graph} \ufoot{\tiny{https://stackoverflow.com/a/24107223}} } \frame{ \frametitle{Version control in Git: How Git stores data} \includegraphics[scale = 0.3]{figs/how_git_stores_data} \ufoot{\tiny{http://git-scm.com/book/en/v2/Getting-Started-What-is-Git}} } \section{Basic Workflow} \frame{ \frametitle{Basic lifecyle of work on a file} \includegraphics[scale = 0.3]{figs/lifecycle} \ufoot{\tiny{http://git-scm.com/book/en/v2/Getting-Started-What-is-Git}} \tiny{*We haven't talked about $staging$ which is an intermediate step between modifying a file and committing it. Git allows you to choose which modified files will be $staged$ (i.e. flagged for inclusion) as part of the next commit. Typically we will want to commit all modifications. Github Desktop automatically stages all modifications, so we don't need to think too much about staging at the moment.} } \frame{ \frametitle{Some Git words} repository, commit, push, fetch, merge, pull, local, origin, master, branch, fork } \frame{ \frametitle{Naming commits} \begin{itemize} \item Use the 'imperative mood.' %(e.g. ``finish your homework'') \item Complete the sentence If this commit is adopted , it will... \item Capitalize the first word. \item Do not use a period. \end{itemize} \includegraphics[scale = 0.5]{figs/commit_messages} \\ \ufoot{\tiny{https://chris.beams.io/posts/git-commit/}} } \end{document}
{ "alphanum_fraction": 0.7417239225, "avg_line_length": 25.8225806452, "ext": "tex", "hexsha": "0a9c307531dd93f27e624748dcdc6e0318ed6cb3", "lang": "TeX", "max_forks_count": 8, "max_forks_repo_forks_event_max_datetime": "2021-11-09T18:04:23.000Z", "max_forks_repo_forks_event_min_datetime": "2020-11-10T18:14:47.000Z", "max_forks_repo_head_hexsha": "435c5c06cdb7da178958af2563a209d80cb11af2", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "analyticalworkflows/TeachingMaterials", "max_forks_repo_path": "classes/VersionControl_Git_part_1/tex/Intro2GitSlides.tex", "max_issues_count": 19, "max_issues_repo_head_hexsha": "435c5c06cdb7da178958af2563a209d80cb11af2", "max_issues_repo_issues_event_max_datetime": "2022-03-01T00:53:51.000Z", "max_issues_repo_issues_event_min_datetime": "2020-08-27T02:23:07.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "analyticalworkflows/TeachingMaterials", "max_issues_repo_path": "classes/VersionControl_Git_part_1/tex/Intro2GitSlides.tex", "max_line_length": 407, "max_stars_count": 4, "max_stars_repo_head_hexsha": "435c5c06cdb7da178958af2563a209d80cb11af2", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "analyticalworkflows/TeachingMaterials", "max_stars_repo_path": "classes/VersionControl_Git_part_1/tex/Intro2GitSlides.tex", "max_stars_repo_stars_event_max_datetime": "2021-04-28T20:54:26.000Z", "max_stars_repo_stars_event_min_datetime": "2019-04-16T19:40:11.000Z", "num_tokens": 886, "size": 3202 }
\subsection{Qualitative Reporting} \label{qualitative_reporting} The qualitative reporting is an integrative part of the class in the form of blog post assignments ($1000\pm500$ words per post). Assignments have covered the following topics by chronological order to allow for progressive mutual sharing of experience on the class website\footnote{The full assignment descriptions can be found on the class website \cite{classweb2013}.} : \begin{enumerate} \item {\bf First Contact with the Community} \\ This topic covers the motivations to choose a specific project as well as the process through which joining has occurred \cite{lakhani2005htu,robert2006,vonKrogh2012}. Since the class organization (c.f. Section \ref{classmotivations}) set no constraints on which project could be chosen a broad variety of experiences where expected. Some reporting was expected on whether joining had been an informal process or a more formal established ``joining" script \cite{vonKrogh2003} and if this experience generalizes to all community joiners. We stress the importance of the joining process since a recent study has shown how barriers to joining can endanger even most established open source projects \cite{halfaker2013} \\ \item {\bf Historical, Cultural and Demography Backgrounds} \\ Ideological, cultural and historical traits play a fundamental role in {\it sorting} communities. Evidence of this sorting has been brought regarding open source license types \cite{belenzon2009}. The evolution of an open source project and the condition under which it can be joined are path dependent. Deciding to join a project requires a good understanding of the cultural traits and social norms of the community. In this part, it was asked to report on the most relevant historical, cultural and demographic traits of joined projects and communities. \\ \item{\bf Communication Infrastructure}\\ The success of open collaboration is deeply rooted in the capacity of community to organize with the help of online information systems (c.f. \cite{benkler2002} for theoretical argument), which in turn determine the way communities interact and keep records of the open collaboration innovation steps. \\ \item {\bf Community Participation} \\ Most open source projects combine a large variety of communication channels from one-to-one, many-to-many, (a)synchronous channels, to virtual or physical meetings. To handle these various channels and to find or properly request information community engagemet and practice is a critical step to stay tuned to the project. Joining a community most often occurs long before actually joining production. \\ \item {\bf Governance \& Decision Process}\\ One of toughest issues with large and rather horizontal communities is the decision process. The relevant question here is : How does a community make tough decisions? Open collaboration communities have shown some level creativity in that regard, with a large variety of governance models ranging from consensus ranging to benevolent dictatorship. The governance approach might be related to the project goals or simply be the result of path dependent history. Depending on the community, the governance structure can be informal -- relying on implicit social norms, or on the contrary clearly institutionalized with formal governance rules. \end{enumerate} \noindent Two blog posts were due after the expected delivery date of this report so their analyses are not included here : \begin{enumerate}[resume] \item {\bf Funding Sources \& Business Model} \\ Most open collaboration projects having reached a sufficient momentum rely on funding. Funding sources are diverse and can determine how a project is likely to further develop. The funding issue is highly related with the business model and how it can make a difference with competing ventures (closed or open source).\\ \item{\bf Organization \& Project Modularity}\\ The ``mirror hypothesis" is a theory that community organization should reflect the technical modularity of a project \cite{maccormack2012}. We are investigating how ``both" modularities can affect project organization and the joining process. \end{enumerate}
{ "alphanum_fraction": 0.80824202, "avg_line_length": 116.6111111111, "ext": "tex", "hexsha": "f5efae11185a488f8cb8459c2cd40d1f4682303d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "02982821412ef169ea08eb32b1b12929501d4624", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sbenthall/i290m-ocpp-site", "max_forks_repo_path": "report/chapters/041_qualitative.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "02982821412ef169ea08eb32b1b12929501d4624", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sbenthall/i290m-ocpp-site", "max_issues_repo_path": "report/chapters/041_qualitative.tex", "max_line_length": 719, "max_stars_count": 1, "max_stars_repo_head_hexsha": "02982821412ef169ea08eb32b1b12929501d4624", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sbenthall/i290m-ocpp-site", "max_stars_repo_path": "report/chapters/041_qualitative.tex", "max_stars_repo_stars_event_max_datetime": "2017-02-25T01:00:07.000Z", "max_stars_repo_stars_event_min_datetime": "2017-02-25T01:00:07.000Z", "num_tokens": 828, "size": 4198 }
\section{Functions with objects}
{ "alphanum_fraction": 0.7714285714, "avg_line_length": 8.75, "ext": "tex", "hexsha": "32c7edbe5ebe8ff405bfb9f12c26f58e4faa497f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/computer/objects/03-00-Functions_with_objects.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/computer/objects/03-00-Functions_with_objects.tex", "max_line_length": 32, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/computer/objects/03-00-Functions_with_objects.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 8, "size": 35 }
% =========================================================================== % Title: % --------------------------------------------------------------------------- % to create Type I fonts type "dvips -P cmz -t letter <filename>" % =========================================================================== \documentclass[11pt]{article} %--- LATEX 2e base \usepackage{latexsym} %--- LATEX 2e base %---------------- Wide format ----------------------------------------------- \textwidth=6in \textheight=9in \oddsidemargin=0.25in \evensidemargin=0.25in \topmargin=-0.5in %--------------- Def., Theorem, Proof, etc. --------------------------------- \newtheorem{definition}{Definition} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{corollary}{Corollary} \newtheorem{property}{Property} \newtheorem{observation}{Observation} \newtheorem{fact}{Fact} \newenvironment{proof} {\noindent{\bf Proof.} }% {\null\hfill$\Box$\par\medskip} %--------------- Algorithm -------------------------------------------------- \newtheorem{algX}{Algorithm} \newenvironment{algorithm} {\begin{algX}\begin{em}}% {\par\noindent --- End of Algorithm --- \end{em}\end{algX}} \newcommand{\step}[2] {\begin{list}{} { \setlength{\topsep}{0cm} \setlength{\partopsep}{0cm} \setlength{\leftmargin}{0.8cm} \setlength{\labelwidth}{0.7cm} \setlength{\labelsep}{0.1cm} } \item[#1]#2 \end{list}} % usage: \begin{algorithm} \label{xyz} % ... \step{(1)}{...} ... % \end{algorithm} %--------------- Figures ---------------------------------------------------- \usepackage{graphicx} \newcommand{\includeFig}[3] {\begin{figure}[htb] \begin{center} \includegraphics [width=4in,keepaspectratio] %comment this line to disable scaling {#2}\caption{\label{#1}#3} \end{center} \end{figure}} % usage: \includeFig{label}{file}{caption} % =========================================================================== \begin{document} % =========================================================================== % ############################################################################ % Title % ############################################################################ \title{LITERATURE REVIEW: --- Parallelizing the Interpolation between Latent Space of Autoencoder Networks to Introduce Novelty in 3D Object Reconstruction} % ############################################################################ % Author(s) (no blank lines !) \author{ % ############################################################################ Tansin Jahan\\ School of Computer Science\\ Carleton University\\ Ottawa, Canada K1S 5B6\\ {\em [email protected]} % ############################################################################ } % end-authors % ############################################################################ \date{October 5,2018} \maketitle % ############################################################################ \section{Introduction} \label{intro} % ############################################################################ In terms of hardware, running every single instruction relies somehow on the computation of either CPU or GPU. Based on the capability of both CPU and GPU, runtime of the program is defined and when this runtime becomes an overhead, it is required to introduce some improvements so that the computation complexity can be minimized. This is where \textbf{parallelization} weigh in as it is capable of utilizing GPU for heavy computation of data. My project involves parallelizing the vector addition produced by a simple \textbf{Autoencoder Neural Network} which is popular to reconstruct same 3D object given as an input. Autoencoder, being a symmetrical deep network involves several convolutional layer and parameters for computation. \begin{figure}[h] \begin{center} \includegraphics[scale=0.5]{autoencoder-architecture.png} \caption{A Simple Autoencoder Architecture} \end{center} \end{figure} The implemented CNN for this project feeds on 3D volumes as input involving higher dimension(i.e. 32x32x32) and then reduces that into lower dimension \cite{dr8} which is called as latent space(also Z). In latent space, represented as Z vector, the input object has minimum dimension with maximum features. From the lower dimension, the Z vector passes to deconvolution and therefore produces the same 3D volumes. But we can combine Z vector of two input volumes and interpolate new points as Z vector so that decoder can produce new 3D objects based on this interpolation. This interpolation of Z vector (addition of two Z vectors) can be an overhead for the performance of the Autoencoder. Let us consider we have 50 inputs. Then for each input, we have to compare it with another 49 inputs and calculate interpolation each time. So, for 50 inputs the vector addition will be 50 x 50 which is in total 2500 times addition of vectors. The above calculation can take much time compared to the convolution and therefore we can parallelize this computation in GPU by using Multithreading option. In conclusion, for this project several thread will be introduced to compute vector addition in CUDA so that the performance of the whole network can be improved. % ############################################################################ \section{Literature Review} \label{litrev} % ############################################################################ With the recent development of Convolutional Neural Network, it has been used to solve several Computer Vision problem. For example, object detection or reconstruction from input image, semantic information extraction from a scenario, object segmentation- these are all recent computer vision application where CNN has been used to produce better result. Likewise, 3D reconstruction of an object is one of the Computer Vision problem and recently multiple approaches (ex: 3D-GAN, 3D-shapenets) has been proposed as a solution to the problem \cite{dr1}. Previous work shows that given a depth image as input, the volumetric representation can be produced \cite{dr2}. Following these works, an approach to reconstruct 3D voxelized object from different viewpoint of one or more images of that object (i.e. single-view or multi-view) is proposed where recurrent neural network has been used \cite{dr7}. In total 50,000 model is used to train and test the proposed network. Training the network with this large amount of data is really time consuming and therefore introducing parallelism between the layers of the model can help to improve the performance of the network. \subsection{GPU implementation} \label{GPUimpl} GPUs are capable of efficiently running parallel programs and outperforms CPUs in terms of raw computing power. In the paper, 'Imagenet classification with deep convolutional neural networks' - two GTX 580 3GB GPU has been used to train the network which took five to six days to complete the whole training \cite{dr5}. In this parallelization scheme, they kept half of the kernels (or neurons) on each GPU, where the GPUs communicate only in certain layers. GPU's parallel processor architecture has made it an essential choice for several applications where parallelism is needed. Such most common areas where GPU computing can be used are - Bioinformatics, Data Science, Analytics, and Databases, Defense and Intelligence, Machine Learning, Imaging and Computer Visions etc. \cite{samel2016gpu}. Using GPU's multithreaded processors, several threads can be introduced to perform matrix operations which is highly efficient for both graphics and general-purpose parallel computing applications \cite{nickolls2008scalable}. To execute programs in GPU written by high-level languages such as C, C++ or Python - CUDA provides a high level interface by dividing CPU as host memory and GPU as device memory. % ############################################################################ % Bibliography % ############################################################################ \bibliographystyle{plain} \bibliography{my-bibliography} %loads my-bibliography.bib % ============================================================================ \end{document} % ============================================================================
{ "alphanum_fraction": 0.5787224471, "avg_line_length": 78, "ext": "tex", "hexsha": "0003378bd809331d616d0978cbf1960cfb4b5cbb", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4d6d59cce00183d25673fa5d82802acff12b3c06", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tansinjahan/tansinjahan.github.io", "max_forks_repo_path": "projects/COMP5704/Literature_Review.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4d6d59cce00183d25673fa5d82802acff12b3c06", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tansinjahan/tansinjahan.github.io", "max_issues_repo_path": "projects/COMP5704/Literature_Review.tex", "max_line_length": 1168, "max_stars_count": null, "max_stars_repo_head_hexsha": "4d6d59cce00183d25673fa5d82802acff12b3c06", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "tansinjahan/tansinjahan.github.io", "max_stars_repo_path": "projects/COMP5704/Literature_Review.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1682, "size": 8892 }
\section{Results} The SaltProc online reprocessing simulation package is demonstrated in four applications: (1) analyzing \gls{MSBR} neutronics and fuel cycle to find the equilibrium core composition and core depletion, (2) studying operational and safety parameters evolution during \gls{MSBR} operation, (3) demonstrating that in a single-fluid two-region \gls{MSBR} conceptual design the undermoderated outer core zone II works as a virtual ``blanket'', reduces neutron leakage and improves breeding ratio due to neutron energy spectral shift, and (4) determining the effect of fission product removal on the core neutronics. The neutron population per cycle and the number of active/inactive cycles were chosen to obtain balance between reasonable uncertainty for a transport problem ($\leq$ 15 pcm\footnote{ 1 pcm = 10$^{-5}\Delta k_{eff}/k_{eff}$} for effective multiplication factor) and computational time. The \gls{MSBR} depletion and safety parameter computations were performed on 64 Blue Waters XK7 nodes (two AMD 6276 Interlagos CPU per node, 16 floating-point Bulldozer core units per node or 32 ``integer'' cores per node, nominal clock speed is 2.45 GHz). The total computational time for calculating the equilibrium composition was approximately 9,900 node-hours (18 core-years.) \subsection{Effective multiplication factor} Figures~\ref{fig:keff}, \ref{fig:keff_zoomed} show the effective multiplication factors obtained using SaltProc and SERPENT2. The effective multiplication factors were calculated after removing fission products listed in Table~\ref{tab:reprocessing_list} and adding the fertile material at the end of cycle time (3 days for this work). The effective multiplication factor fluctuates significantly as a result of the batch-wise nature of this online reprocessing strategy. \begin{figure}[ht!] \centering \includegraphics[width=\textwidth]{keff.png} \caption{Effective multiplication factor dynamics for full-core \gls{MSBR} model over a 60-year reactor operation lifetime.} \label{fig:keff} \end{figure} \begin{figure}[ht!] \centering \includegraphics[width=\textwidth]{keff_zoomed.png} \caption{Zoomed effective multiplication factor for 150-EFPD time interval.} \label{fig:keff_zoomed} \end{figure} First, SERPENT calculates the effective multiplication factor for the beginning of the cycle (there is fresh fuel composition at the first step). Next, it computes the new fuel salt composition at the end of a 3-day depletion. The corresponding effective multiplication factor is much smaller than the previous one. Finally, SERPENT calculates $k_{eff}$ for the depleted composition after applying feeds and removals. The $K_{eff}$ increases accordingly since major reactor poisons (e.g. Xe, Kr) are removed, while fresh fissile material ($^{233}$U) from the protactinium decay tank is added. Additionally, the presence of rubidium, strontium, cesium, and barium in the core are disadvantageous to reactor physics. Overall, the effective multiplication factor gradually decreases from 1.075 to $\approx$1.02 at equilibrium after approximately 6 years of irradiation. In fact, SaltProc fully removes all of these elements every 3435 days (not a small mass fraction every 3 days) which causes the multiplication factor to jump by approximately 450 pcm, and limits using the batch approach for online reprocessing simulations. In future versions of SaltProc this drawback will be eliminated by removing elements with longer residence times (seminoble metals, volatile fluorides, Rb, Sr, Cs, Ba, Eu). In that approach, chemistry models will inform separation efficiencies for each reprocessing group and removal will optionally be spread more evenly accross the cycle time. \subsection{Fuel salt composition dynamics} The analysis of the fuel salt composition evolution provides more comprehensive information about the equilibrium state. Figure~\ref{fig:adens_eq} shows the number densities of major nuclides which have a strong influence on the reactor core physics. The concentration of $^{233}$U, $^{232}$Th, $^{233}$Pa, and $^{232}$Pa in the fuel salt change insignificantly after approximately 2500 days of operation. In particular, the $^{233}$U number density fluctuates by less than 0.8\% between 16 and 20 years of operation. Hence, a quasi-equilibrium state was achieved after 16 years of reactor operation. \begin{figure}[ht!] % replace 't' with 'b' to \centering \includegraphics[width=\textwidth]{major_isotopes_adens.png} \caption{Number density of major nuclides during 60 years of reactor operation.} \label{fig:adens_eq} \end{figure} In contrast, a wide variety of nuclides, including fissile isotopes (e.g. $^{235}$U) and non-fissile strong absorbers (e.g. $^{234}$U), kept accumulating in the core. Figure~\ref{fig:fissile_short} demonstrates production of fissile isotopes in the core. In the end of the considered operational time, the core contained significant $^{235}$U ($\approx10^{-5}$ atom/b-cm), $^{239}$Pu ($\approx5\times10^{-7}$ atom/b-cm), and $^{241}$Pu ($\approx 5\times10^{-7}$ atom/b-cm). Meanwhile, the equilibrium number density of the target fissile isotope $^{233}$U was approximately 7.97$\times10^{-5}$ atom/b-cm. Small dips in neptunium and plutonium number density every 16 years are caused by removing $^{237}$Np and $^{242}$Pu (included in Processing group ``Higher nuclides'', see Table~\ref{tab:reprocessing_list}) which decay into $^{235}$Np and $^{239}$Pu, respectively. Thus, production of new fissile materials in the core, as well as $^{233}$U breeding, made it possible to compensate for negative effects of strong absorber accumulation and keep the reactor critical. \begin{figure}[htp!] % replace 't' with 'b' to \centering \includegraphics[width=\textwidth]{fissile_short.png} \caption{Number density of fissile in epithermal spectrum nuclides accumulation during the reactor operation.} \label{fig:fissile_short} \end{figure} \subsection{Neutron spectrum} Figure~\ref{fig:spectrum} shows the normalized neutron flux spectrum for the full-core \gls{MSBR} model in the energy range from $10^{-8}$ to $10$ MeV. The neutron energy spectrum at equilibrium is harder than at startup due to plutonium and other strong absorbers accumulating in the core during reactor operation. \begin{figure}[ht!] % replace 't' with 'b' to force it to \centering \includegraphics[width=\textwidth]{spectrum.png} \caption{The neutron flux energy spectrum is normalized by unit lethargy and the area under the curve is normalized to 1 for initial and equilibrium fuel salt composition.} \label{fig:spectrum} \end{figure} Figure~\ref{fig:spectrum_zones} shows that zone I produced more thermal neutrons than zone II, corresponding to a majority of fissions occurring in the central part of the core. In the undermoderated zone II, the neutron energy spectrum is harder, which leads to more neutrons capture by $^{232}$Th and helps achieve relatively high breeding ratio. Moreover, the (n,$\gamma$) resonance energy range in $^{232}$Th is from 10$^{-4}$ to 10$^{-2}$ MeV. Therefore, the moderator-to-fuel ratio for zone II was chosen to shift the neutron energy spectrum in this range. Furthermore, in the central core region (zone I), the neutron energy spectrum shifts to a harder spectrum over 20 years of reactor operation. Meanwhile, in the outer core region (zone II), a similar spectral shift takes place at a reduced scale. These results are in a good agreement with original ORNL report \cite{robertson_conceptual_1971} and the most recent whole-core steady-state study \cite{park_whole_2015}. It is important to obtain the epithermal and thermal spectra to produce $^{233}$U from $^{232}$Th because the radiative capture cross section of thorium decreases monotonically from $10^{-10}$ MeV to $10^{-5}$ MeV. Hardening the spectrum tends to significantly increase resonance absorption in thorium and decrease absorptions in fissile and construction materials. \begin{figure}[ht!] % replace 't' with 'b' to force it to \centering \includegraphics[width=\textwidth]{spectrum_zones.png} \caption{The neutron flux energy spectrum in different core regions is normalized by unit lethargy and the area under the curve is normalized to 1 for the initial and equilibrium fuel salt composition.} \label{fig:spectrum_zones} \end{figure} \subsection{Neutron flux} Figure~\ref{fig:radial_flux} shows the radial distribution of fast and thermal neutron flux for the both initial and equilibrium composition. The neutron fluxes have similar shapes for both compositions but the equilibrium case has a harder spectrum. A significant spectral shift was observed in the central region of the core (zone I), while for the outer region (zone II), it is negligible for fast but notable for thermal neutrons. These neutron flux radial distributions agree with the fluxes in the original ORNL report \cite{robertson_conceptual_1971}. Overall, spectrum hardening during \gls{MSBR} operation should be carefully studied when designing the reactivity control system. \begin{figure}[ht!] % replace 't' with 'b' to force it to \centering \includegraphics[width=\textwidth]{radial_flux.png} \caption{Radial neutron flux distribution for initial and equilibrium fuel salt composition.} \label{fig:radial_flux} \end{figure} \subsection{Power and breeding distribution} Table~\ref{tab:powgen_fraction} shows the power fraction in each zone for initial and equilibrium fuel compositions. Figure~\ref{fig:pow_den} reflects the normalized power distribution of the \gls{MSBR} quarter core for equilibrium fuel salt composition. For both the initial and equilibrium compositions, fission primarily occurs in the center of the core, namely zone I. The spectral shift during reactor operation results in slightly different power fractions at startup and equilibrium, but most of the power is still generated in zone I at equilibrium (table~\ref{tab:powgen_fraction}). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{table}[ht!] \caption{Power generation fraction in each zone for initial and equilibrium state.} \begin{tabularx}{\textwidth}{ m | s | s } \hline Core region & Initial & Equilibrium \\ \hline Zone I & 97.91\% & 98.12\% \\ Zone II & 2.09\% & 1.88\% \\ \hline \end{tabularx} \label{tab:powgen_fraction} \end{table} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Figure~\ref{fig:breeding_den} shows the neutron capture reaction rate distribution for $^{232}$Th normalized by the total neutron flux for initial and equilibrium states. The distribution reflects the spatial distribution of $^{233}$U production in the core. $^{232}$Th neutron capture produces $^{233}Th$ which then $\beta$-decays to $^{233}$Pa, the precursor for $^{233}$U production. Accordingly, this characteristic represents the breeding distribution in the \gls{MSBR} core. Spectral shift does not cause significant changes in power nor in breeding distribution. Even after 20 years of operation, most of the power is still generated in zone I. \begin{figure}[ht!] % replace 't' with 'b' to force it to \centering \includegraphics[width=\textwidth]{power_distribution_eq.png} \caption{Normalized power density for equilibrium fuel salt composition.} \label{fig:pow_den} \end{figure} \begin{figure}[ht!] % replace 't' with 'b' to force it to \centering \includegraphics[width=\textwidth]{breeding_distribution_eq.png} \caption{$^{232}$Th neutron capture reaction rate normalized by total flux for equilibrium fuel salt composition.} \label{fig:breeding_den} \end{figure} \subsection{Temperature coefficient of reactivity} Table~\ref{tab:tcoef} summarizes temperature effects on reactivity calculated in this work for both initial and equilibrium fuel compositions, compared with the original \gls{ORNL} report data \cite{robertson_conceptual_1971}. By propagating the $k_{eff}$ statistical error provided by SERPENT2, uncertainty for each temperature coefficient was obtained and appears in Table~\ref{tab:tcoef}. Other sources of uncertainty are neglected, such as cross section measurement error and approximations inherent in the equations of state providing both the salt and graphite density dependence on temperature. The main physical principle underlying the reactor temperature feedback is an expansion of heated material. When the fuel salt temperature increases, the density of the salt decreases, but at the same time, the total volume of fuel salt in the core remains constant because it is bounded by the graphite. When the graphite temperature increases, the density of graphite decreases, creating additional space for fuel salt. To determine the temperature coefficients, the cross section temperatures for the fuel and moderator were changed from 900K to 1000K. Three different cases were considered: \begin{enumerate} \item Temperature of fuel salt rising from 900K to 1000K. \item Temperature of graphite rising from 900K to 1000K. \item Whole reactor temperature rising from 900K to 1000K. \end{enumerate} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{table}[ht!] \caption{Temperature coefficients of reactivity for initial and equilibrium state.} \begin{tabularx}{\textwidth}{ X | r | r | r } \hline Reactivity coefficient & Initial & Equilibrium & Reference \\ & [pcm/k] & [pcm/k] & (initial)\cite{robertson_conceptual_1971} \tabularnewline \hline Doppler in fuel salt & $-4.73\pm0.038$ & $-4.69\pm0.038$ & $-4.37$ \tabularnewline Fuel salt density & $+1.21\pm0.038$ & $+1.66\pm0.038$ & $+1.09$ \tabularnewline Total fuel salt & $-3.42\pm0.038$ & $-2.91\pm0.038$ & $-3.22$ \tabularnewline \hline Graphite spectral shift & $+1.56\pm0.038$ & $+1.27\pm0.038$ & \tabularnewline Graphite density & $+0.14\pm0.038$ & $+0.23\pm0.038$ & \tabularnewline Total moderator (graphite) & $+1.69\pm0.038$ & $+1.35\pm0.038$ & $+2.35$ \tabularnewline \hline Total core & $-1.64\pm0.038$ & $-1.58\pm0.038$ & $-0.87$ \tabularnewline \hline \end{tabularx} \label{tab:tcoef} \end{table} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In the first case, changes in the fuel temperature only impact fuel density. In this case, the geometry is unchanged because the fuel is a liquid. However, when the moderator heats up, both the density and the geometry change due to thermal expansion of the solid graphite blocks and reflector. Accordingly, the new graphite density was calculated using a linear temperature expansion coefficient of 1.3$\times10^{-6}$K$^{-1}$ \cite{robertson_conceptual_1971}. A new geometry input for SERPENT2, which takes into account displacement of graphite surfaces, was created based on this information. For calculation of displacement, it was assumed that the interface between the graphite reflector and vessel did not move, and that the vessel temperature did not change. This is the most reasonable assumption for the short-term reactivity effects because inlet salt is cooling graphite reflector and inner surface of the vessel. The fuel temperature coefficient (FTC) is negative for both initial and equilibrium fuel compositions due to thermal Doppler broadening of the resonance capture cross sections in the thorium. A small positive effect of fuel density on reactivity increases from $+1.21$ pcm/K at reactor startup to $+1.66$ pcm/K for equilibrium fuel composition which has a negative effect on FTC magnitude during the reactor operation. This is in good agreement with earlier research \cite{robertson_conceptual_1971,park_whole_2015}. The moderator temperature coefficient (MTC) is positive for the startup composition and decreases during reactor operation because of spectrum hardening with fuel depletion. Finally, the total temperature coefficient of reactivity is negative for both cases, but decreases during reactor operation due to spectral shift. In summary, even after 20 years of operation the total temperature coefficient of reactivity is relatively large and negative during reactor operation (comparing with conventional PWR which has temperature coefficient about -1.71 pcm/$^\circ$F $\approx$ -3.08 pcm/K \cite{forget_integral_2018}), despite positive MTC, and affords excellent reactor stability and control. \subsection{Reactivity control system rod worth} Table~\ref{tab:rod_worth} summarizes the reactivity control system worth. During normal operation, the control (graphite) rods are fully inserted, and the safety (B$_4$C) rods are fully withdrawn. To insert negative reactivity into the core, the graphite rods are gradually withdrawn from the core. In an accident, the safety rods would be dropped down into the core. The integral rod worths were calculated for various positions to separately estimate the worth of the control graphite rods\footnote{In \cite{robertson_conceptual_1971}, the graphite rods are referred to as ``control'' rods.}, the safety (B$_4$C) rods, and the whole reactivity control system. Control rod integral worth is approximately 28 cents and stays almost constant during reactor operation. The safety rod integral worth decreases by 16.2\% during 20 years of operation because of neutron spectrum hardening and absorber accumulation in proximity to reactivity control system rods. This 16\% decline in control system worth should be taken into account in \gls{MSBR} accident analysis and safety justification. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{table}[ht!] \caption{Control system rod worth for initial and equilibrium fuel composition.} \begin{tabularx}{\textwidth}{ b | x | x } \hline Reactivity parameter [cents] & Initial & Equilibrium \\ \hline Control (graphite) rod integral worth & $\ 28.2\pm0.8$ & $\ 29.0\pm0.8$ \\ Safety (B$_4$C) rod integral worth & $251.8\pm0.8$ & $211.0\pm0.8$ \\ Total reactivity control system worth & $505.8\pm0.7$ & $424.9\pm0.8$ \\ \hline \end{tabularx} \label{tab:rod_worth} \end{table} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Six Factor Analysis} The effective multiplication factor can be expressed using the following formula: \begin{align*} k_{eff} = k_{inf} P_f P_t = \eta \epsilon p f P_f P_t \end{align*} Table~\ref{tab:six_factor} summarizes the six factors for both initial and equilibrium fuel salt composition. Using SERPENT2 and SaltProc, these factors and their statistical uncertainties have been calculated for both initial and equilibrium fuel salt composition (see Table~\ref{tab:msbr_tab}). The fast and thermal non-leakage probabilities remain constant despite the evolving neutron spectrum during operation. In contrast, the neutron reproduction factor ($\eta$), resonance escape probability ($p$), and fast fission factor ($\epsilon$) are considerably different between startup and equilibrium. As indicated in Figure~\ref{fig:spectrum}, the neutron spectrum is softer at the beginning of reactor life. Neutron spectrum hardening causes the fast fission factor to increase through the core lifetime. The opposite is true for the resonance escape probability. Finally, the neutron reproduction factor decreases during reactor operation due to accumulation of fissile plutonium isotopes. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{table}[hb!] \caption{Six factors for the full-core \gls{MSBR} model for initial and equilibrium fuel composition.} \begin{tabularx}{\textwidth}{ b | s | s } \hline Factor & Initial & Equilibrium \\ \hline Neutron reproduction factor ($\eta$) & $1.3960\pm.000052$ & $1.3778\pm.00005$ \\ Thermal utilization factor (f) & $0.9670\pm.000011$ & $0.9706\pm.00001$ \\ Resonance escape probability (p) & $0.6044\pm.000039$ & $0.5761\pm.00004$ \\ Fast fission factor ($\epsilon$) & $1.3421\pm.000040$ & $1.3609\pm.00004$ \\ Fast non-leakage probability (P$_f$) & $0.9999\pm.000004$ & $0.9999\pm.000004$ \\ Thermal non-leakage probability (P$_t$) & $0.9894\pm.000005$ & $0.9912\pm.00005$ \\ \hline \end{tabularx} \label{tab:six_factor} \end{table} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Thorium refill rate} In a \gls{MSBR} reprocessing scheme, the only external feed material flow is $^{232}$Th. Figure~\ref{fig:th_refill} shows the $^{232}$Th feed rate calculated for 60 years of reactor operation. The $^{232}$Th feed rate fluctuates significantly as a result of the batch-wise nature of this online reprocessing approach. Figure~\ref{fig:th_refill_zoomed} shows zoomed thorium feed rate for short 150-EFPD interval. Note that the large spikes of up to 36 kg/day in a thorium consumption occurs every 3435 days. This is required due to strong absorbers (Rb, Sr, Cs, Ba) removal at the end of effective cycle (100\% of these elements removing every 3435 days of operation). The corresponding effective multiplication factor increase (Figure~\ref{fig:keff}) and breeding intensification leads to additional $^{232}$Th consumption. \begin{figure}[ht!] % replace 't' with 'b' to force it to \centering \includegraphics[width=\textwidth]{Th_refill_rate.png} \caption{$^{232}$Th feed rate over 60 years of \gls{MSBR} operation.} \label{fig:th_refill} \end{figure} \begin{figure}[ht!] % replace 't' with 'b' to force it to \centering \includegraphics[width=\textwidth]{Th_refill_rate_zoomed.png} \caption{Zoomed $^{232}$Th feed rate for 150-EFPD time interval.} \label{fig:th_refill_zoomed} \end{figure} The average thorium feed rate increases during the first 500 days of operation, and steadily decreases due to spectrum hardening and accumulation of absorbers in the core. As a result, the average $^{232}$Th feed rate over 60 years of operation is about 2.40 kg/day. This thorium consumption rate is in good agreement with a recent online reprocessing study by \gls{ORNL} \cite{betzler_molten_2017}. At equilibrium, the thorium feed rate is determined by the reactor power, the energy released per fission, and the neutron energy spectrum. \subsection{The effect of removing fission product from fuel salt} Loading initial fuel salt composition into the \gls{MSBR} core leads to a supercritical configuration (Figure ~\ref{fig:fp_removal}). After reactor startup, the effective multiplication factor for the case with volatile gases and noble metals removal is approximately 7500 pcm higher than for case with no fission products removal. This significant impact on the reactor core is achieved due to immediate removal (20 sec cycle time) and high absorption cross section of Xe, Kr, Mo, and other noble metals removed. The effect of rare earth element removal is considerable a few months after startup and reached approximately 5500 pcm after 10 years of operation. The rare earth elements were removed at a slower rate (50-day cycle time). Moreover, Figure~\ref{fig:fp_removal} demonstrates that batch-wise removal of strong absorbers every 3 days did not necessarily leads to fluctuation in results but rare earth elements removal every 50 days causes an approximately 600 pcm jump in reactivity. The effective multiplication factor of the core reduces gradually over operation time because the fissile material ($^{233}$U) continuously depletes from the fuel salt due to fission while fission products accumulate in the fuel salt simultaneously. Eventually, without fission products removal, the reactivity decreases to the subcritical state after approximately 500 and 1300 days of operation for cases with no removal and volatile gases \& noble metals removal, respectively. The time when the simulated core reaches subcriticality ($k_{eff}<$1.0) for full-core model) is called the core lifetime. Therefore, removing fission products provides with significant neutronic benefit and enables a longer core lifetime. \begin{figure}[ht!] % replace 't' with 'b' to force it to \centering \includegraphics[width=\textwidth]{keff_rem_cases.png} \caption{Calculated effective multiplication factor for full-core \gls{MSBR} model with removal of various fission product groups over 10 years of operation.} \label{fig:fp_removal} \end{figure}
{ "alphanum_fraction": 0.7437823938, "avg_line_length": 60.1183574879, "ext": "tex", "hexsha": "0af84887f493268b1fec41eb8fd54a251b70d15c", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2020-04-30T15:06:40.000Z", "max_forks_repo_forks_event_min_datetime": "2018-05-17T17:04:23.000Z", "max_forks_repo_head_hexsha": "0b0e9e2f643038ef3359ece0dafbc536f833695c", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "andrewryh/msbr-reproc", "max_forks_repo_path": "results.tex", "max_issues_count": 38, "max_issues_repo_head_hexsha": "5c942f25fed851a38d8055a73a23c35a3de5b80d", "max_issues_repo_issues_event_max_datetime": "2019-01-20T21:55:45.000Z", "max_issues_repo_issues_event_min_datetime": "2018-05-18T14:58:21.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "arfc/2020-rykhl-dissertation", "max_issues_repo_path": "2020-annuals-uq/results.tex", "max_line_length": 143, "max_stars_count": 4, "max_stars_repo_head_hexsha": "0b0e9e2f643038ef3359ece0dafbc536f833695c", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "andrewryh/msbr-reproc", "max_stars_repo_path": "results.tex", "max_stars_repo_stars_event_max_datetime": "2020-03-25T21:13:11.000Z", "max_stars_repo_stars_event_min_datetime": "2018-10-10T19:43:59.000Z", "num_tokens": 6231, "size": 24889 }
\documentclass[% reprint, %superscriptaddress, %groupedaddress, %unsortedaddress, %runinaddress, %frontmatterverbose, %preprint, %preprintnumbers, nofootinbib, %nobibnotes, %bibnotes, amsmath,amssymb, aps, %pra, %prb, %rmp, %prstab, %prstper, floatfix, ]{revtex4-2} \usepackage{gensymb} \usepackage{textcomp} \usepackage{lipsum} \usepackage{graphicx}% Include figure files \usepackage{dcolumn}% Align table columns on decimal point \usepackage{bm}% bold math \usepackage{siunitx} \DeclareSIUnit\gauss{G} \DeclareSIUnit\erg{erg} \DeclareMathOperator{\Rot}{rot} \sisetup{separate-uncertainty=true} \usepackage{tabularx} \usepackage{amssymb} \usepackage{amsmath} \usepackage{relsize} \usepackage{commath} \usepackage{enumitem} \usepackage{xfrac} \usepackage{float} \usepackage{booktabs} \usepackage{makecell} \usepackage{caption} \usepackage{subcaption} \usepackage{multirow} \usepackage[version=4]{mhchem} \usepackage[colorlinks,bookmarks=false,citecolor=blue,linkcolor=blue,urlcolor=blue]{hyperref} %\usepackage{hyperref}% add hypertext capabilities %\usepackage[mathlines]{lineno}% Enable numbering of text and display math %\linenumbers\relax % Commence numbering lines %\usepackage[showframe,%Uncomment any one of the following lines to test %%scale=0.7, marginratio={1:1, 2:3}, ignoreall,% default settings %%text={7in,10in},centering, %%margin=1.5in, %%total={6.5in,8.75in}, top=1.2in, left=0.9in, includefoot, %%height=10in,a5paper,hmargin={3cm,0.8in}, %]{geometry} \begin{document} \preprint{APS/123-QED} \title{Rutherford Scattering}% Force line breaks with \\ \author{Maitrey Sharma} \email{[email protected]} \affiliation{School of Physical Sciences, National Institute of Science Education and Research, HBNI, Jatni-752050, India} \date{\today}% It is always \today, today, % but any date may be explicitly specified \begin{abstract} In this experiment, we perform the famous Gold foil experiment whose results were first explained by Ernest Rutherford in 1909. We bombard Gold (and later Aluminium) foil by using a alpha source Am-241 and observe the scattering that happens at various angles (orientations) between the source beam and the receiving foil. We explain the results so obtained and compare them from Gold and Aluminium foils. We also determine the atomic number of Aluminium. \end{abstract} \keywords{} \maketitle %\tableofcontents \section{\label{sec:level1}Introduction} In 1909, Hans Geiger and Ernest Marsden performed the gold foil experiment in collaboration with Ernest Rutherford, in which they fired a beam of alpha particles (helium nuclei) at foils of gold leaf only a few atoms thick. At the time of the experiment, the atom was thought to be analogous to a plum pudding (as proposed by J. J. Thomson), with the negatively-charged electrons (the plums) studded throughout a positive spherical matrix (the pudding). If the plum-pudding model were correct, the positive "pudding", being more spread out than in the correct model of a concentrated nucleus, would not be able to exert such large coulombic forces, and the alpha particles should only be deflected by small angles as they pass through. \par However, the intriguing results showed that around 1 in 20,000 alpha particles were deflected by very large angles (over 90°), while the rest passed through with little deflection. From this, Rutherford concluded that the majority of the mass was concentrated in a minute, positively-charged region (the nucleus) surrounded by electrons. When a (positive) alpha particle approached sufficiently close to the nucleus, it was repelled strongly enough to rebound at high angles. The small size of the nucleus explained the small number of alpha particles that were repelled in this way. Rutherford showed, using the method outlined below, that the size of the nucleus was less than about $10^{-14}$ m (how much less than this size, Rutherford could not tell from this experiment alone; see more below on this problem of lowest possible size). As a visual example, figure (\ref{fig:cloudchamber}) shows the deflection of an alpha particle by a nucleus in the gas of a cloud chamber. \begin{figure} \centering \includegraphics[scale = 0.15]{Figures/AlphaTrackRutherfordScattering3.jpg} \caption{In a cloud chamber, a $\SI{5.3}{\mega \electronvolt}$ alpha particle track from a lead-210 pin source near point 1 undergoes Rutherford scattering near point 2, deflecting by an angle of about $30 \degree$. It scatters once again near point 3, and finally comes to rest in the gas. The target nucleus in the chamber gas could have been a nitrogen, oxygen, carbon, or hydrogen nucleus. It received enough kinetic energy in the elastic collision to cause a short visible recoiling track near point 2. (The scale is in centimeters.)} \label{fig:cloudchamber} \end{figure} \par Rutherford scattering is the elastic scattering of charged particles by the Coulomb interaction. It led to the development of the planetary Rutherford model of the atom and eventually the Bohr model. Rutherford scattering was first referred to as Coulomb scattering because it relies only upon the static electric (Coulomb) potential, and the minimum distance between particles is set entirely by this potential. The classical Rutherford scattering process of alpha particles against gold nuclei is an example of \textit{elastic scattering} because neither the alpha particles nor the gold nuclei are internally excited. \section{Apparatus} \begin{enumerate} \item Scattering chamber after Rutherford \item Aluminium and Gold foil in frame \item Vacuum pump \item Discriminator preamplifier \item Counter \item Plug-in power supply unit \item Am-241 preparation \end{enumerate} \section{Experiment description} The experimental appratues is given in figure (\ref{fig:setup}) \begin{figure} \centering \includegraphics[scale = 0.5]{Figures/setup.png} \caption{Experimental setup schematically for the Rutherford Scattering Experiment.} \label{fig:setup} \end{figure} The detailed diagram of the scattering chamber is given in figure (\ref{fig:chamber}). \begin{figure} \centering \includegraphics[scale = 0.6]{Figures/chamber.png} \caption{The scattering chamber (1) Preparation (2) Holder (3) Gold foil (4) Slit (5) swivel arm (6) detector} \label{fig:chamber} \end{figure} The scattering geometry is given in figure (\ref{fig:geometry}). \begin{figure} \centering \includegraphics[scale = 0.6]{Figures/geometry.png} \caption{The scattering geometry (1) preparation (2) collimator slit (3) gold foi (4) detector} \label{fig:geometry} \end{figure} \section{Theory} If $\alpha$-particles are allowed to strike a thin gold foil, they are deflected from their path (\textit{scattering}), each by an angle $\theta$. The majority of $\alpha$-particles is scattered by angles less than $1 \degree$ (figure (\ref{fig:scattering})). \begin{figure} \centering \includegraphics[scale = 0.6]{Figures/scattering.png} \caption{Scattering of $\alpha$-particles on a monolayer of atoms.} \label{fig:scattering} \end{figure} \par A few particles, however, show substantially large scattering angles $\theta$, in the extreme case up to $180 \degree$ (\textit{back scattering}). These initially qualitative observations can only be explained by assuming that the gold atoms have a very small nucleus, containing practically the whole atomic mass, and being positively charged. \par On the basis of this idea Rutherford calculated the angular distribution of the scattering rate $N(\theta)$. The scattering rate is the number of particles which are scattered during the time unit in a determined interval $d \theta$ around an average angle $\theta$. The result of this calculation is \textit{Rutherford’s scattering formula}: \begin{equation} \label{eq:Ntheta} N (\theta) = N_0 \cdot c_F \cdot d_F \dfrac{Z^2 \cdot d^4}{(8 \pi \epsilon_0 E_{\alpha})^2 \cdot \sin^4 (\theta/2)} \end{equation} where $N_0$ is the particle rate in the foil; $c_F$ is the atomic concentration in the foil; $d_F$ is the thickness of the foil; $Z$ is the nuclear charge number of the scattering material; $E_{\alpha}$ is the energy of the $\alpha$-particles; $e$ is the elementary charge ($e = \SI{1.6021e-19}{\ampere \second}$); $\epsilon_0$ is the dielectric constant in vacuum ($\epsilon_0 = \SI{8.8524e-12}{\ampere \second \per \volt \per \metre}$). \par The $\alpha$-particles emitted from the Am-241 preparation fall through a slit aperture of $\SI{5}{\milli \metre}$ width onto the gold foil and leave this gold foil with various scattering angles. The scattered $\alpha$-particles are identified with a semiconductor detector. \par If we compare the scattering rates between two different foil materials (e.g. Au and Al) at the same angle $\theta$, we can derive from the scattering formula (\ref{eq:Ntheta}): \begin{equation} \dfrac{N_{Au}}{N_{Al}} = \dfrac{c_{Au} d_{Au} Z^2_{Au}}{c_{Al} d_{Al} Z^2_{Al}} \end{equation} and thus \begin{equation} \label{eq:Zal} Z_{Al} = \sqrt{\dfrac{N_{Al}(\theta) c_{Au} d_{Au} Z^2_{Au}}{N_{Au}(\theta) c_{Al} d_{Al}}} \end{equation} \section{Evaluation and Results} After recording the pulse counts $n(\theta)$ the mean values $n_m(\theta)$ can be determined. Using the mean values $n_m(\theta)$ the scattering rates $N_d(\theta)$ are calculated by \begin{equation} N_d(\theta) = \dfrac{n_m (\theta)}{t(\theta)} \end{equation} These measuring results $N_d(\theta)$ are typical for a plane scattering geometry which is given by the transparent construction of the chamber used in this experiment. The theoretical function (according to Rutherford’s formula), however, is related to a three-dimensional geometry. The relation between these different aspects can considered by the following concept (figure (\ref{fig:angle})). \begin{figure} \centering \includegraphics[scale = 0.7]{Figures/angular.png} \caption{The $\alpha$-particles are scattered into the angular region $\vartheta + d \vartheta$.} \label{fig:angle} \end{figure} \par Each plane angle $\theta$ corresponds in space to a cone with an aperture of $2 \cdot \theta$ (produced by rotation of the plane structure around the incident beam axis). In the same way the plane angular differential $d \theta$ corresponds in three dimensions to a spatial angular differential $d \Omega$ given by \begin{equation} d \Omega = 2 \cdot \pi \cdot \sin (\theta) d\theta \end{equation} This geometrical corrections allows to derive a relation between the plane scattering rate $N_d(\theta)$ and the spatial scattering rate $N(\theta)$: \begin{equation} N (\theta) = 2 \cdot \pi \cdot \sin (\theta) \cdot N d\theta \end{equation} Finally, the corresponding spatial values $N(\theta)$ are calculated (\ref{tab:data}) and the space corrected values plotted in a diagram (\ref{fig:graph}). \par The measuring value pairs $\{\theta / N(\theta )\}$ can be compared with the shape of the theoretical curve of equation: \begin{equation} f(\theta) = \dfrac{A}{\sin^4 \Big( \dfrac{\theta - b}{2} \Big)} \end{equation} The proportionality factor $A$ represents a vertical shift (at logarithmic scale). The coefficient $B$ is representing a small displacement along the horizontal angular scale. \par Now we have for $N_{Au} (15 \degree) = \SI{18.701}{\per \second} $ and $N_{Al} (15 \degree) = \SI{3.402}{\per \second}$ with $d_{Au} = \SI{2}{\micro \metre}$, $d_{Al} = \SI{8}{\micro \metre}$, $c_{Au} \approx c_{Al}$ and $Z_{Au} = 79$, we obtain from equation (\ref{eq:Zal}): \begin{equation} Z_{Al} = 16.8 \end{equation} and for $N_{Au} (-15 \degree) = \SI{4.011}{\per \second} $ and $N_{Al} (-15 \degree) = \SI{0.285}{\per \second}$ and rest of the parameters same as before, we get \begin{equation} Z_{Al} = 10.5 \end{equation} Taking mean $Z_{Al}$, we obtain $Z_{Al} = 13.7$ which is close to the actual value of $Z_{Al} = 13$. \par The data recorded is tabulated in table (\ref{tab:data}). The corresponding plot between scattering angle $\theta$ and $N(\theta)$ is given in figure (\ref{fig:graph}). \newcommand{\ra}[1]{\renewcommand{\arraystretch}{#1}} \begin{table*}[] \ra{1.2} \caption{Measured values for Gold foil and slit width $d = \SI{5}{\milli \metre}$} \label{tab:data} \setlength{\tabcolsep}{5pt} \begin{tabular}{@{}ccccccc@{}} \toprule \begin{tabular}[c]{@{}c@{}}\textbf{Angle}, $\theta$\\ (degrees)\end{tabular} & \begin{tabular}[c]{@{}c@{}}\textbf{Angle}, $\theta$\\ (radians)\end{tabular} & \begin{tabular}[c]{@{}c@{}}\textbf{Gate time} $t(\theta)$\\ ($\si{\second}$)\end{tabular} & \textbf{Pulse counts} $n(\theta)$ & \begin{tabular}[c]{@{}c@{}}\textbf{Mean Pulse counts}\\ $n_m(\theta)$ \end{tabular} & \begin{tabular}[c]{@{}c@{}}\textbf{Counting rate} \\ \textit{(directly)} \\ $N_d (\theta)$ ($\si{\per \second}$)\end{tabular} & \begin{tabular}[c]{@{}c@{}}\textbf{Counting rate} \\ \textit{(space corrected)} \\ $N (\theta)$ ($\si{\per \second}$)\end{tabular} \\ \midrule \multirow{3}{*}{-30} & \multirow{3}{*}{-0.524} & \multirow{3}{*}{900} & 148 & \multirow{3}{*}{152} & \multirow{3}{*}{0.169} & \multirow{3}{*}{0.531} \\ & & & 151 & & & \\ & & & 157 & & & \\ \\ \multirow{3}{*}{-25} & \multirow{3}{*}{-0.436} & \multirow{3}{*}{600} & 227 & \multirow{3}{*}{230} & \multirow{3}{*}{0.383} & \multirow{3}{*}{1.016} \\ & & & 241 & & & \\ & & & 221 & & & \\ \\ \multirow{3}{*}{-20} & \multirow{3}{*}{-0.349} & \multirow{3}{*}{200} & 572 & \multirow{3}{*}{594} & \multirow{3}{*}{2.970} & \multirow{3}{*}{6.382} \\ & & & 597 & & & \\ & & & 613 & & & \\ \\ \multirow{3}{*}{-15} & \multirow{3}{*}{-0.262} & \multirow{3}{*}{100} & 1181 & \multirow{3}{*}{1150} & \multirow{3}{*}{11.500} & \multirow{3}{*}{18.701} \\ & & & 1126 & & & \\ & & & 1143 & & & \\ \\ \multirow{3}{*}{-10} & \multirow{3}{*}{-0.175} & \multirow{3}{*}{100} & 2797 & \multirow{3}{*}{2817} & \multirow{3}{*}{28.173} & \multirow{3}{*}{30.739} \\ & & & 2771 & & & \\ & & & 2884 & & & \\ \\ -5 & -0.087 & 100 & 3558 & 3558 & 35.580 & 19.484 \\ \\ 5 & 0.087 & 100 & 2787 & 2787 & 27.870 & 15.262 \\ \\ \multirow{3}{*}{10} & \multirow{3}{*}{0.175} & \multirow{3}{*}{100} & 1204 & \multirow{3}{*}{1246} & \multirow{3}{*}{12.460} & \multirow{3}{*}{13.595} \\ & & & 1265 & & & \\ & & & 1269 & & & \\ \\ \multirow{3}{*}{15} & \multirow{3}{*}{0.262} & \multirow{3}{*}{100} & 246 & \multirow{3}{*}{247} & \multirow{3}{*}{2.467} & \multirow{3}{*}{4.011} \\ & & & 255 & & & \\ & & & 239 & & & \\ \\ \multirow{3}{*}{20} & \multirow{3}{*}{0.349} & \multirow{3}{*}{200} & 86 & \multirow{3}{*}{75} & \multirow{3}{*}{0.375} & \multirow{3}{*}{0.806} \\ & & & 71 & & & \\ & & & 68 & & & \\ \\ \multirow{3}{*}{25} & \multirow{3}{*}{0.436} & \multirow{3}{*}{600} & 104 & \multirow{3}{*}{115} & \multirow{3}{*}{0.192} & \multirow{3}{*}{0.509} \\ & & & 128 & & & \\ & & & 113 & & & \\ \\ \multirow{3}{*}{30} & \multirow{3}{*}{0.524} & \multirow{3}{*}{900} & 77 & \multirow{3}{*}{73} & \multirow{3}{*}{0.081} & \multirow{3}{*}{0.254} \\ & & & 72 & & & \\ & & & 69 & & & \\ \bottomrule \end{tabular} \end{table*} \begin{figure} \centering \includegraphics[scale = 0.2]{Figures/graph.png} \caption{The plot of scattering angle $\theta$ against $N(\theta)$ and the corresponding fitted curve according to equation (\ref{eq:Ntheta})} \label{fig:graph} \end{figure} \begin{table*}[!ht] \caption{Measured values for Aluminium foil and slit width $d = \SI{5}{\milli \metre}$} \label{tab:data} \begin{tabular}{@{}ccccccc@{}} \toprule \begin{tabular}[c]{@{}c@{}}\textbf{Angle}, $\theta$\\ (degrees)\end{tabular} & \begin{tabular}[c]{@{}c@{}}\textbf{Angle}, $\theta$\\ (radians)\end{tabular} & \begin{tabular}[c]{@{}c@{}}\textbf{Gate time} $t(\theta)$\\ ($s$)\end{tabular} & \textbf{Pulse counts} $n(\theta)$ & \begin{tabular}[c]{@{}c@{}}\textbf{Mean Pulse counts}\\ $n_m(\theta)$ \end{tabular} & \begin{tabular}[c]{@{}c@{}}\textbf{Counting rate} \\ \textit{(directly)} \\ $N_d (\theta)$ ($\si{\per \second}$)\end{tabular} & \begin{tabular}[c]{@{}c@{}}\textbf{Counting rate} \\ \textit{(space corrected)} \\ $N (\theta)$ ($\si{\per \second}$)\end{tabular} \\ \midrule \\ -15 & -0.262 & 1000 & 2092 & 2092 & 2.092 & 3.402 \\ \\ 15 & 0.262 & 1000 & 175 & 175 & 0.175 & 0.285 \\ \\ \bottomrule \end{tabular} \end{table*} \section{Discussions} \begin{enumerate} \item A small inaccuracy of the collimator-slit adjustment or non-centric distribution of the radiation, coming from the preparation in the holder, may cause a shift of the curve along the horizontal axis (angle shift $<3 \degree$). \item Due to such effects it is useful to record scattering rates as well in the positive as in the negative angular range, to get information of both branches with respect to an accurate determination of the symmetry-axis displacement. \end{enumerate} \section{Precautions} \begin{enumerate} \item The radioactive sources should be held with utmost care. \item Never touch the gold or aluminium foil. \item Venting of the chamber after the experiment has to be done very carefully. \end{enumerate} \section{Conclusions} The results obtained through the experiment were satisfactory and as expected. \end{document} % % ****** End of file apssamp.tex ******
{ "alphanum_fraction": 0.4006545887, "avg_line_length": 101.8451178451, "ext": "tex", "hexsha": "de55f4bed3dc6ef18347778c1ca888e25ec9e3f1", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "63a3515329998192ae76ee3e9095b74868eacdfc", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "peakcipher/p344-nuclear-physics-lab", "max_forks_repo_path": "rutherford scattering/Main/apssamp.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "63a3515329998192ae76ee3e9095b74868eacdfc", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "peakcipher/p344-nuclear-physics-lab", "max_issues_repo_path": "rutherford scattering/Main/apssamp.tex", "max_line_length": 982, "max_stars_count": null, "max_stars_repo_head_hexsha": "63a3515329998192ae76ee3e9095b74868eacdfc", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "peakcipher/p344-nuclear-physics-lab", "max_stars_repo_path": "rutherford scattering/Main/apssamp.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5747, "size": 30248 }
% LaTeX Curriculum Vitae Template % % Copyright (C) 2004-2009 Jason Blevins <[email protected]> % http://jblevins.org/projects/cv-template/ % % You may use use this document as a template to create your own CV % and you may redistribute the source code freely. No attribution is % required in any resulting documents. I do ask that you please leave % this notice and the above URL in the source code if you choose to % redistribute this file. \documentclass[letterpaper, 11pt]{article} \usepackage{hyperref} \usepackage{geometry} \usepackage{url} \usepackage{hanging} \usepackage{parskip} \usepackage[utf8]{inputenc} \usepackage{multirow} % Comment the following lines to use the default Computer Modern font % instead of the Palatino font provided by the mathpazo package. % Remove the 'osf' bit if you don't like the old style figures. \usepackage[T1]{fontenc} \usepackage[sc,osf]{mathpazo} % Set your name here \def\name{Jiacheng He} % Replace this with a link to your CV if you like, or set it empty % (as in \def\footerlink{}) to remove the link in the footer: \def\footerlink{} % The following metadata will show up in the PDF properties \hypersetup{ colorlinks = true, urlcolor = black, pdfauthor = {\name}, pdfkeywords = {economics, statistics, mathematics}, pdftitle = {\name: Curriculum Vitae}, pdfsubject = {Curriculum Vitae}, pdfpagemode = UseNone } \geometry{ body={6.5in, 8.5in}, left=0.8in, right=0.8in, top=1.25in } % Customize page headers \pagestyle{myheadings} \markright{\name} \thispagestyle{empty} % Custom section fonts \usepackage{sectsty} \sectionfont{\rmfamily\mdseries\Large} \subsectionfont{\rmfamily\mdseries\itshape\large} % Other possible font commands include: % \ttfamily for teletype, % \sffamily for sans serif, % \bfseries for bold, % \scshape for small caps, % \normalsize, \large, \Large, \LARGE sizes. % Don't indent paragraphs. \setlength\parindent{0em} % Make lists without bullets \renewenvironment{itemize}{ \begin{list}{}{ \setlength{\leftmargin}{1.5em} } }{ \end{list} } \begin{document} % Place name at left \hspace{1cm}{\huge \name} % Alternatively, print name centered and bold: %\centerline{\huge \bf \name} \vspace{0.25in} \hspace{0.8cm} \begin{minipage}{0.67\linewidth} \begin{tabular}{ll} Email: & \href{mailto:[email protected]}{\tt [email protected]} \\ Homepage: & \href{https://jiachenghe.github.io}{\tt jiachenghe.github.io} \\ LinkedIn: & \href{https://www.linkedin.com/in/jiachenghe}{\tt www.linkedin.com/in/jiachenghe} \\ GitHub: & \href{https://github.com/JiachengHe/}{\tt github.com/JiachengHe/} \\ Kaggle: & \href{https://www.kaggle.com/dg040301}{\tt www.kaggle.com/dg040301} \\ % Homepage: & \href{http://tesswise.com}{\tt http://tesswise.com} \\ \end{tabular} \end{minipage} \begin{minipage}{0.4\linewidth} \begin{tabular}{r} \href{http://economics.ku.edu}{Department of Economics} \\ University of Kansas \\ Snow Hall 257 \\ Lawrence, KS, 66045\\ \end{tabular} \end{minipage} %\section*{Research Interests} %\begin{itemize} %\item I am interested in causal inference, text analysis and experimental political science. Current projects including using Twitter to study collective action in the context of the Occupy movement and using regression discontinuity designs to study the effect of citizenship in Western Europe. %\end{itemize} \vspace{8mm} \noindent \begin{tabular}{@{} p{3cm} p{10cm} r} \Large{Education} & \textbf{University of Kansas} (GPA 4.0)\\ & \hspace{5mm}Ph.D. Candidate in Economics & 2014-2019 (expected) \\ & \hspace{5mm}Graduate Certificate in Applied Mathematics & 2014-2018 (expected) \\ & \hspace{5mm}M.A. in Economics & 2014-2016 \\ & \\ & \textbf{Guangdong University of Foreign Studies, China} \\ & \hspace{5mm}B.A. in Finance & 2010-2014 \\ & \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} l} \Large{Research} & Applied Microeconometrics, Labor Economics, Health Economics, \\ \Large{Fileds} & Applied Machine Learning, Natural Language Processing \\ & \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} p{0.75\linewidth}} \Large{Research} & \textbf{Working Papers} \\ \Large{Projects} & "Does Age-Based Public Health Insurance Eligibility Save Medicaid Divorce? Regression Discontinuity Evidence at Age 65" [\href{https://jiachenghe.github.io/files/paper/Medicaid_Divorce.pdf}{\underline{PDF}}] [\href{https://jiachenghe.github.io/files/slides/Medicaid_Divorce_slides.pdf}{\underline{slides}}] \vspace{3mm} \\ & "Monte Carlo Simulation on Causal Forest" [\href{https://jiachenghe.github.io/files/paper/Causal_Forest.pdf}{\underline{PDF}}] [\href{https://jiachenghe.github.io/files/slides/Causal_Forest_slides.pdf}{\underline{slides}}] [\href{https://github.com/JiachengHe/Project904}{\underline{GitHub}}] \vspace{3mm} \\ & "Sentiment and Content Analysis Surrounding the Pulse Nightclub Shooting", with Mohammad Isyroqi Fathan, Sri Gayatri Sundar, Muhammad Saad Adnan, and Sierra Seacat [\href{https://jiachenghe.github.io/files/paper/Orlando_Shooting.pdf}{\underline{PDF}}] \vspace{3mm} \\ & "Occupational Effects of Housing Boom and Routine Biased Technological Change" \\ & \\ &\textbf{Work in Progress} \\ & "Did Removing Asset Test of Medicare Savings Program Reduce Seniors’ Difficulty to Access Health Care?" \\ & \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} p{10cm} r} \Large{Kaggle} & \href{https://www.kaggle.com/c/donorschoose-application-screening}{DonorsChoose.org Application Screening} & 03/2018 - Present \\ \Large{Experience} & \href{https://www.kaggle.com/c/house-prices-advanced-regression-techniques}{ House Prices: Advanced Regression Techniques} & 01/2018 - Present \\ & \href{https://www.kaggle.com/c/favorita-grocery-sales-forecasting}{Corporación Favorita Grocery Sales Forecasting} & 11/2017 - 01/2018 \\ & \href{https://www.kaggle.com/c/titanic}{Titanic: Machine Learning from Disaster} & 10/2017 - Present \\ & \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} p{10cm} r} \Large{Teaching} & \textbf{Graduate Teaching Assistant at University of Kansas} \\ \Large{Experience} & \hspace{5mm}Principle of Microeconomics (ECON 142) & 2014F, 2015S, 2017F \\ & \hspace{5mm}Master-level Macroeconomics (ECON 701) & 2018S \\ & \hspace{5mm}PhD Microeconomic Theory 1 (ECON 800 \& 801) & 2015F, 2016F \\ & \hspace{5mm}PhD Macroeconomic Theory 1 (ECON 809 \& 810) & 2016S, 2017S, 2018S \\ & \hspace{5mm}PhD Macroeconomic Theory 2 (ECON 811) & 2016F \\ & \hspace{5mm} (F = fall semester, S = spring semester) \\ & \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} p{11cm} r} \Large{Honors and } & \textbf{University of Kansas}\\ \Large{Awards} & \hspace{5mm}Passed PhD oral comprehensive exam with honors & 12/2016 \\ & \hspace{5mm}Economics Department Research Fellowship & Summer 2016 \\ & \hspace{5mm}Economics Department Research Fellowship & Summer 2015 \\ & \\ & \textbf{Guangdong University of Foreign Studies} \\ & \hspace{5mm}Outstanding Bachelor Thesis Award & 05/2014 \\ & \hspace{5mm}Outstanding Student Scholarship & 2011-2013\\ & \hspace{5mm}Model Student of Academic Records Award & 2012-2013\\ & \hspace{5mm}Jetta Scholarship & 2011-2012 \\ & \hspace{5mm}Merit Student Award & 2011-2012 \\ & \\ & \textbf{China Society for Industrial and Applied Mathematics} \\ & \hspace{5mm}\multirow{2}{11cm}{Third Prize in Contemporary Undergraduate Mathematical Contest in Modeling (CUMCM)} & 10/2013 \\ & \\ & \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} p{12cm} r} \Large{Research} & KU Economics Department Seminar & 03/2018 \\ \Large{Presentation} & Washington University in St. Louis 12th Economics Graduate Students' Conference & 10/2017 \\ & Missouri Valley Economic Association 54th Annual Meeting & 10/2017 \\ & KU Economics Department Seminar & 10/2017 \\ & \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} l} \Large{Computer} & R, Python, Git, Linux, \LaTeX, \\ \Large{Skills} & SQL, SAS, Stata, MATLAB, Julia \\ & \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} p{15cm} } \Large{MOOC} & Julia Scientific Programming (with Honors) [\href{https://www.coursera.org/account/accomplishments/certificate/X8AH3BM8MML6}{\underline{link}}] \\ \Large{Certificate} & Introduction to Computer Science and Programming Using Python [\href{https://s3.amazonaws.com/verify.edx.org/downloads/7444184db6054c2e8404972c595b5ebf/Certificate.pdf}{\underline{link}}] \\ \\ \end{tabular} \noindent \begin{tabular}{@{} p{3cm} l} \Large{Language} & English (fluent), Cantonese (native), Mandarin (native) \\ \end{tabular} \vspace{1cm} % Footer \begin{center} \begin{footnotesize} Last updated: \today \\ Click \href{https://jiachenghe.github.io/files/cv/CV.pdf}{\underline{here}} for the newest version. \end{footnotesize} \end{center} \end{document}
{ "alphanum_fraction": 0.7157026635, "avg_line_length": 33.8603773585, "ext": "tex", "hexsha": "160605676bbf687b863cec5f22b75c883ec6bc1c", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-10-23T09:08:09.000Z", "max_forks_repo_forks_event_min_datetime": "2020-10-23T09:08:09.000Z", "max_forks_repo_head_hexsha": "4eeeff278fcb045743b2a26b3368cc8356754908", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JiachengHe/hugo-academic", "max_forks_repo_path": "static/files/cv/academic/CV.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4eeeff278fcb045743b2a26b3368cc8356754908", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JiachengHe/hugo-academic", "max_issues_repo_path": "static/files/cv/academic/CV.tex", "max_line_length": 345, "max_stars_count": null, "max_stars_repo_head_hexsha": "4eeeff278fcb045743b2a26b3368cc8356754908", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "JiachengHe/hugo-academic", "max_stars_repo_path": "static/files/cv/academic/CV.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2877, "size": 8973 }
\title{\bf Star formation} \section{Basics} Star formation is a fundamental process in forming galaxies. It proceeds through the cooling and gravitational collapse of gas within gas {\it clouds}. Based on molecular gas measurements, the mass function of clouds has a slope of about $\alpha\sim 1.5$--2 ($\dd{N}/\dd{M} = M^{-\alpha}$) between $10^3$ and $10^6$ $M_\odot$. During the collapse process, the gas becomes cold enough for molecules to form, and stars form always within molecular clouds. Individual stars form in a range of masses. Light from the most massive forming stars tends to disassociate and then ionize the surrounding gas cloud. Dust in the cloud absorbs and is heated by ultraviolet light from the stars. The {\it star clusters} that result seem to have a similar mass distribution to the gas clouds (though it is thought that clusters form within isolated {\it clumps} within the clouds). At redshift zero, spiral galaxies at Milky Way masses have star formation rates around a few solar masses per year. Most estimates of the dependence on stellar mass find SFR $\propto M_\ast^{2/3}$. At higher redshifts the typical star formation rates of such galaxies are higher, by about a factor of ten at $z\sim 1$. \subsection{Initial mass function} The stars within the clusters form over a large range of masses. The initial mass function estimated within the Milky Way appears to peak between 0.1 and 0.3 $M_\odot$, and at higher masses decline with a slope close to $-2.35$ (the {\it Salpeter} slope). This initial mass function may depend on environment in various ways. Estimates of star formation rate integrate the mass of stars extending down to the hydrogen burning limit ($0.08$ $M_\odot$) even though the star formation indicators are only directly sensitive to much more massive stars. \subsection{Color-magnitude diagram} For systems whose stars can be resolved from each other, we can estimate the star formation rate by modeling the color-magnitude diagram, sometimes using all the stars or sometimes only the most massive O-type or Wolf-Rayet stars. This method can estimate the star formation rate over the past 100 million years or so. Systems like the Orion Nebula Cluster can be studied in this way, and with Hubble Space Telescope observations nearby galaxies can be mapped in their star formation rate. \subsection{Ultraviolet light} More useful for most galaxies is the integrated ultraviolet continuum light, which (depending on the IMF) traces stars of a few solar masses, and consequently star formation over the past 100 million years. However, dust attenuates the ultraviolet light strongly. In principle, the slope of the ultraviolet continuum constrains the dust attenuation. Because the dust re-emits in the infrared, this leads to a relationship between the ultraviolet spectral slope $\beta$ and the ratio of the infrared to ultraviolet light (``IRX''; \citealt{overzier11a, grasha13a}). However, in practice there appears to be too much scatter in the dust attenuation as a function of reddening for this correction to be sufficient. \subsection{Emission lines} The most massive young stars ($>15$ $M_\odot$) produce large ionized {\it HII regions} surrounding them. Recombination in these regions produces a sequence of Balmer lines (and also higher wavelength Paschen and Brackett lines) with line ratios that are only a weak function of the gas conditions. To a good approximation, there is one Balmer line produced for every ionized photon. Therefore, Balmer lines can be used to determine the total ionizing flux, which is related to the number of massive stars, which due to their short lifetimes is related to the star formation rate within the past 10 million years. H$\alpha$ is the usual Balmer line of choice as the brightest and reddest, to minimize dust extinction. The Paschen and Brackett lines are more robust but less accessible with ground-based instruments. The H$\alpha$ emission needs to be corrected for dust as well. Typically, we use the {\it Balmer decrement}, the ratio of the Balmer lines compared to the (unextincted) theoretical expectation. With an assumed extinction curve, the Balmer decrement yields an extinction correction. For low attenuation levels (up to about 1 magnitude at H$\alpha$), this method is sufficient but it becomes uncertain when there is large small scale variation in the extinction, which is typical at high extinction. \subsection{Infrared light} Dust extinction of the ultraviolet light leads to reemission at infrared wavelengths. This emission is a mix of lines from polycyclic aromatic hydrocarbons (PAHs) and thermal emission from dust grains at a mix of temperatures. PAHs are typically only $\sim 1$\% of the dust map but dominate the emission between 5--20 $\mu$m. Longward of 20 $\mu$m thermal emission from dust grains dominates, though note there are some atomic lines from the gas. The total infrared luminosity plus the total ultraviolet light traces the total star formation, in principle. However, evolved stars older than a few hundred million years can heat the dust as well. This heating typically occurs more diffusely throughout the galaxy and is less concentrated in star forming regions; it can contribute to up to half the infrared signal for low specific star formation rate systems. A number of calibrations have been performed that are useful if one only has infrared observations or only PAH measurements (e.g. for higher redshift systems). \subsection{Radio continuum} The radio continuum of galaxies correlates with other measures of star formation. There are two components, a thermal free-free component which at cm wavelengths is in the optically thin limit so relatively flat, and a synchrotron component with a steeper spectrum. The free-free component should correlate with the ionizing luminosity that heats the plasma. The synchrotron component correlates with other star formation indicators for sufficiently luminous galaxies, but the physical reasons behind this correlation are unknown. The synchrotron component dominates, so high enough signal-to-noise ratio multifrequency data is necessary to measure the free-free emission. Most radio measurements of star formation are based on 20 cm (1.4 GHz) continuum measurements of the synchrotron. \subsection{Kennicutt-Schmidt Law} The Kennicutt-Schmidt law relates the surface density of atomic and molecular gas to the surface density of star formation, averaged over the galactic disk. Above $\Sigma_{\rm gas} = 10$ $M_\odot$ pc$^{-2}$, galaxies obey $\Sigma_{\rm SF} \propto \Sigma_{\rm gas}^n$ with $n\sim 1.4$, with a scatter of about 0.3 dex. At lower densities, there is less star formation and more scatter than the power law predicts. Note that star formation surface density is linearly related to the total dense molecular gas (as traced by HCN for example). When comparing the star formation locally within galaxies, similar trends are seen, with a similar threshold density. The physical causes of the Kennicutt-Schmidt law are unclear. The scaling can be derived if the star formation rate is driven by large scale instabilities in the gas, leading the rate to be proportional to the free fall time $t_{\rm ff}$ in the disk, with the depletion time $M_\ast/\dot M_\ast \sim 100 t_{\rm ff}$. However, there are many intermediate processes between the large scale gravitational collapse of the gas and the production of stars. The Kennicutt-Schmidt law could be explained by an appropriate variation of the fraction of molecular gas as a function of density and a constant conversion rate of molecular gas into stars. The cause of the threshold at 10 $M_\odot$ pc$^{-2}$ is also not clear. If star formation is initiated by large scale instabilities, then the {\it Toomre instability criterion} must be satisfied (\citealt{toomre64a}). A thin gas disk is unstable to large scale perturbations when: \begin{equation} Q_{\rm gas} = \frac{\sigma_g \kappa}{\pi G \Sigma_{\rm gas}} < 1 \end{equation} where $\kappa$ is the epicyclic frequency of the disk, and $\sigma_g$ is the velocity dispersion of the gas. For higher $Q$, the Coriolis forces in the disk prevent large scale collapse even above the Jeans mass. If stars dominate the gravitational field (as for a large spiral at $z\sim 0$), then it can be shown that: \begin{equation} Q_{\rm \ast} = \frac{\sigma_{\ast,R} \kappa}{3.36 G \Sigma_{\rm \ast}} < 1 \end{equation} Under most conditions found in the local universe, both $Q_{\rm gas}$ and $Q_{\rm \ast}$ are of order but greater than unity, so clearly this simplified picture is not complete. Observationally, the $Q$value in disks tends to correlate with many other parameters and it is therefore difficult to disentangle whether the observed threshold is due to the Toomre criterion or a different one (\citealt{leroy08a}). Considerations that are ``global'' like these ones probably only govern the formation, or not, of molecular clouds. Within molecular clouds, fragmentation must occur and collapse of individual cores to form individual stars. This process is complex and involves turbulence, magnetic fields (which supply substantial pressure), as well as cooling physics, all important well below any resolvable scale within a galaxy simulation. \section{Key References} \begin{itemize} \item {\it Star Formation in the Milky Way and Nearby Galaxies, \href{https://ui.adsabs.harvard.edu/abs/2012ARA%26A..50..531K/abstract}{\citet{kennicutt12a}}} \end{itemize} \section{Order-of-magnitude Exercises} \begin{enumerate} \item Assuming that the observables are unchanged, what is the difference in inferred star formation rate between assuming a Salpeter function all the way to the hydrogen burning limit, and one that turns over to a constant below 0.5 $M_\odot$? \item For a Milky Way-mass galaxy, what is the typical depletion time for its gas? Is it lower or higher for lower mass galaxies? Given how star formation rates change with redshift, what should we expect about the gas content of higher redshift galaxies? \end{enumerate} \section{Analytic Exercises} \begin{enumerate} \item The Kennicutt-Schmidt Law says that $\Sigma_{\rm SF} \propto \Sigma_{\rm gas}^{3/2}$. Assuming that gas disks have a uniform thickness, show that this relationship would result if the star formation rate density at all points in the disk was proportional to the gravitational free-fall time multiplied by the gas density. \item The Jeans instability can be derived for a self-gravitating gas by linearizing the fluid equations: \begin{eqnarray} \dot\rho &=& - \nabla\cdot\vec{v} \cr \dot\vec{\rho v} &=& - \nabla P - \rho \nabla\Phi \cr \dot\vec{v} &=& - \frac{1}{\rho}\nabla P - \nabla\Phi \cr \end{eqnarray} \end{enumerate} \section{Numerics and Data Exercises} \begin{enumerate} \item SP models and UV star formation rate \item Comparing SFRs for a single galaxy from UV, Halpha, IR. \item Balmer decrement \item Strongest star formers \item XUV disks \end{enumerate} \bibliographystyle{apj} \bibliography{exex}
{ "alphanum_fraction": 0.7885466534, "avg_line_length": 48.1347826087, "ext": "tex", "hexsha": "3995cd2846dfd91e59e010c2f556fe2754692b7c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b4d9d52b4fe8af761783f49b2c197a109d94cfdf", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "blanton144/exex", "max_forks_repo_path": "tex/star-formation-text.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b4d9d52b4fe8af761783f49b2c197a109d94cfdf", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "blanton144/exex", "max_issues_repo_path": "tex/star-formation-text.tex", "max_line_length": 135, "max_stars_count": null, "max_stars_repo_head_hexsha": "b4d9d52b4fe8af761783f49b2c197a109d94cfdf", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "blanton144/exex", "max_stars_repo_path": "tex/star-formation-text.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2721, "size": 11071 }
\subsection{CPS} \label{section:AAMByExample:CPS} \input{sections/03AAMByExample/00CPS/main.tex} \subsection{Concrete Interpreter} \label{section:AAMByExample:Concrete} \input{sections/03AAMByExample/01Concrete/main.tex} \subsection{State Space Abstraction} \label{section:AAMByExample:AbstractStateSpace} \input{sections/03AAMByExample/02AbstractStateSpace/main.tex} \subsection{Abstract Semantics} \label{section:AAMByExample:AbstractSemantics} \input{sections/03AAMByExample/03AbstractSemantics/main.tex} \subsection{Recovering the Concrete Interpreter} \label{section:AAMByExample:RecoveringConcrete} \input{sections/03AAMByExample/04RecoveringConcrete/main.tex} \subsection{Recovering 0CFA} \label{section:AAMByExample:Recovering0CFA} \input{sections/03AAMByExample/05Recovering0CFA/main.tex} \subsection{Recovering kCFA} \label{section:AAMByExample:RecoveringKCFA} \input{sections/03AAMByExample/06RecoveringKCFA/main.tex} \subsection{Optimizations} \label{section:AAMByExample:Optimizations} \input{sections/03AAMByExample/07Optimizations/main.tex}
{ "alphanum_fraction": 0.8494825964, "avg_line_length": 33.21875, "ext": "tex", "hexsha": "d787c0ec490680b1539d8c12c08723f1aed96eb9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ebecd37d5cdda61a8bb250059076ca1dd5f3174c", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "davdar/quals", "max_forks_repo_path": "writeup-old/sections/03AAMByExample/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ebecd37d5cdda61a8bb250059076ca1dd5f3174c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "davdar/quals", "max_issues_repo_path": "writeup-old/sections/03AAMByExample/main.tex", "max_line_length": 61, "max_stars_count": 1, "max_stars_repo_head_hexsha": "ebecd37d5cdda61a8bb250059076ca1dd5f3174c", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "davdar/quals", "max_stars_repo_path": "writeup-old/sections/03AAMByExample/main.tex", "max_stars_repo_stars_event_max_datetime": "2017-12-03T00:43:28.000Z", "max_stars_repo_stars_event_min_datetime": "2017-12-03T00:43:28.000Z", "num_tokens": 293, "size": 1063 }
\section{Prob 2. e)} Fig. \ref{fig:fig5} contains a log-log plot of $N(x)=n(x)4\pi x^2$ and a histogram of average number of satellites in each of 20 logarithmically-spaced bins between $x=10^{-4}$ and $x_{max}$. \lstinputlisting{log_hist.py} \begin{figure}[ht!] \centering \includegraphics[width=0.9\linewidth]{./plots/log_hist.png} \caption{Log-log Plot of $N(x)$, $N(x) = n(x)4\pi x^2$, \ and a histogram of average number of galaxies in each bin.} \label{fig:fig5} \end{figure}
{ "alphanum_fraction": 0.686746988, "avg_line_length": 33.2, "ext": "tex", "hexsha": "417d6521a2e5092c1d5ab1815578fe1c173d7719", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1f4bf40c504cd5f0117a9986c2756dcfd5bfc5c5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rywjhzd/Numerical-Recipes-In-Astrophysics", "max_forks_repo_path": "Hand_in_exercise_1/log_hist.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1f4bf40c504cd5f0117a9986c2756dcfd5bfc5c5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rywjhzd/Numerical-Recipes-In-Astrophysics", "max_issues_repo_path": "Hand_in_exercise_1/log_hist.tex", "max_line_length": 192, "max_stars_count": null, "max_stars_repo_head_hexsha": "1f4bf40c504cd5f0117a9986c2756dcfd5bfc5c5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rywjhzd/Numerical-Recipes-In-Astrophysics", "max_stars_repo_path": "Hand_in_exercise_1/log_hist.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 172, "size": 498 }
\documentclass{article} \usepackage{amsmath} \begin{document} \section{Algorithms} Euclid's algorithm (Algorithm E) Given two positive integers $n$ and $m$, find their GCD. \begin{enumerate} \item If $m < n$, switch $m \longleftrightarrow n$. \item Divide $m$ by $n$ and let $r$ be the remainder. \item If $r = 0$ then the algorithm terminates and $n$ from Step 2 is the answer. \item If $r \neq 0$, set $m \leftarrow n$ and $n \leftarrow r$, then go to Step 2. \end{enumerate} A sketch of a formal definition of an algorithm: A \emph{computational method} is a quadruple $(Q,I,\Omega,f)$, where \begin{itemize} \item $I\subset Q$ is the \emph{input} \item $\Omega \subset Q$ is the \emph{output} \item $f : Q \rightarrow Q$ is the \emph{computational rule}, which satisfies $f(\omega) = \omega\ \forall \omega \in \Omega$. \end{itemize} Each $x\in I$ defines a \emph{computational sequence} $x_0,x_1,\dots,$, where $x_0=x$ and $x_{k+1} = f(x_k)$. The sequence \emph{terminates} in $k$ steps if $k$ is the smallest integer such that $x_k \in \Omega$. An \emph{algorithm} is a computational method that terminates in some finite number of steps for all $x$ in $I$. As an example, we present Euclid's algorithm in this formalization: let $Q$ be the set of all singletons $\{n\}$, all ordered pairs $(m,n)$, and all ordered quadruples $(m,n,r,1),(m,n,r,2),(m,n,p,3)$ where $m,n,p$ are positive integers and $r$ is a nonnegative integer. Let $I$ be the ordered pairs $(m,n)$ and $\Omega$ the set of singletons $\{n\}$. Define $f$ by \begin{align*} f((m,n)) = (m,n,0,1);\ f((n)) = (n); \\ f((m,n,r,1)) = (m,n, m\ \% \ n, 2); \\ f((m,n,r,2)) = (n)\ \mathrm{if}\ r=0,\ (m,n,r,3)\ \mathrm{otherwise}; \\ f((m,n,p,3)) = (n,p,p,1) \end{align*} Note that this definition of an algorithm doensn't include any notions of effectiveness - i.e., the ability for the algorithm to be performed with constituent elementary operations (compass-and-straightedge, MIX instructions, etc). We introduce a ``programming'' notion of effectiveness as follows: let $A$ be a finite set of letters, and let $A^*$ be the set of all strings on $A$. Let $N$ be a nonnegative integer and $Q$ be the set of all $(\sigma,j)$ with $\sigma \in A^*$ and $j$ is an integer such that $0\leq j \leq N$. Let $I \subset J$ be the subset of $J$ where $j=0$ and $\Omega$ be the subset of $J$ where $j=N$. If $\theta,\sigma \in A^*$, we say that $\theta$ occurs in $\sigma$ if there exists $\alpha,\omega\in A^* $ such that $\sigma = \alpha\theta\omega$ (under string concatenation). Finally, let $f$ be a function defined by the strings $\theta_j$ and $\phi_j$, and the integers $a_j$ and $b_j$, for each $0\leq j \leq N$: \begin{flalign*} f((\sigma,j)) = (\sigma,a_j) && \text{if}\ \sigma_j\ \text{does not occur in}\ \theta \\ f((\sigma,j)) = (\alpha\phi_j\omega,b_j) && \text{if}\ \alpha\ \text{is the shortest possible string such that}\ \sigma=\alpha\theta_j\omega \\ f((\sigma,N)) = (\sigma,N) \end{flalign*} \section{Algorithms - solutions to exercises} \ \indent \textbf{1.1.1} $t \leftarrow a$, $a \leftarrow b$, $b \leftarrow c$, $c \leftarrow d$, $d\leftarrow t$. \ \textbf{1.1.2} We have $m \leftarrow n$ and $n \leftarrow r$. Since $r < n$, after assignment $n < m$. \ \textbf{1.1.3} \textbf{Algorithm F}. Given two positive integers $m$ and $n$, find the greatest common divisor. \ \textbf{F1} Divide $m$ by $n$. \ \textbf{F2} Set $m$ equal to the remainder. \ \textbf{F3} If $m=0$ then the answer is $n$. \ \textbf{F4} Otherwise divide $n$ by $m$. \ \textbf{F5} Set $n$ equal to the remainder. \ \textbf{F6} If $n=0$ then the answer is $m$. \ \textbf{F7} Go to \textbf{F1}. \ \textbf{1.1.4} $6099 \% 2166 = 1767 \Rightarrow 2166 \% 1767 = 399$ $\Rightarrow 1767 \% 399 = 171 \Rightarrow 399 \$ 171 = 57 \Rightarrow 171 \% 57 = 0$. So the GCD is 57. \ \textbf{1.1.5} Not finite, not definite, not effective. \textbf{1.1.6} $n=1$: $1 \% 5 = 1 \Rightarrow 5 \% 1 = 0$, 2 steps $n=2$: $2 \% 5 = 2 \Rightarrow 5 \% 2 = 1 \Rightarrow 2 \% 1 = 0$, 3 steps $n=3$: $3 \% 5 = 3 \Rightarrow 5 \% 3 = 2 \Rightarrow 3 \% 2 = 1 \Rightarrow 2 \%1 = 0$, 4 steps $n=4$: $4 \% 5 = 4 \Rightarrow 5 \% 4 = 4 \Rightarrow 4 \% 4 = 0$, 3 steps $n=5$: $5 \% 5 = 0$, 1 step So $T_5 = 2.6$. \ \textbf{1.1.7} $U_m$ is well-defined: if $n > m$, the first step of the Euclidean algorithm simply swaps $n$ and $m$ (since $m\ \%\ n = m$) and $U_m = T_m + 1$. If $n < m$ then there are only finitely many cases. \textbf{1.1.8} \textbf{Note: I was a bit confused by this presentation and checked the answers.} Find the GCD of $n$ and $m$, given an alphabet containing $a$ and $b$ and an input $a^m b^n$. Our intent is to translate Euclid's algorithm, using the hint provided: \begin{enumerate} \item Set $r \leftarrow |m - n|$ and $n \leftarrow \mathrm{min}(m,n)$ \item If $r = 0$ then $m = n$ and $n$ is the GCD of $m$ and $n$, so the algorithm terminates. \item Otherwise set $m \leftarrow n$ and $n \leftarrow r$ and go to Step 1. \end{enumerate} We first prove that this is in fact the GCD. If $g$ is the GCD of $m$ and $n$, then either $g = n$ or $g < n$. The first case is correct handled by Step 2. Otherwise, in step 3, $g$ is clearly the GCD of $\min(m,n)$ and by hypothesis $m\neq n$. Suppose $m>n$; then $m - n = ag - bg = g(a - b)$ for some $0 < b < a$ where $g$ does not share any factors with $a$ or $b$ and $g > a,b$. Therefore $g$ is the GCD of $r$ as well. To translate this algorithm into the formalism, we will need an alphabet character, $c$, to represent the remainder. Phrasing the algorithm as words: \begin{enumerate} \item Start with $a^m b^n$. \item Remove $n$ $b$s from the right and $n$ $a$s from the left, then append $n$ $c$s to the left. \item There are $|m-n|$ $a$s left over (aka $r$ from above) and $n$ $c$s to the left of the $a$s. \item If $m=n$ then there are no $a$s and the number of $c$s (aka $m$ and $n$ is the GCD. \item Otherwise swap the $a$s with $b$s and the $c$s with $a$s, then repeat. \end{enumerate} This faithfully reproduces the algorithm above. Therefore, in the formalism, \textbf{1.1.9} \section{Mathematical Preliminaries - Induction} Algorithmic proof procedure: \textbf{Algorithm I - Construct a proof} Given a positive integer $n$ and proposition $P(n)$, this algorithm will output a proof that $P(n)$ is true (if it succeeds). \textbf{I1} [Prove $P(1)$] Set $k \leftarrow 1$ and use another algorithm to output a proof of $P(1)$. \textbf{I2} [$k=n$?] If $k=n$, terminate - the required proof was found in the previous step. \textbf{I3} [$k<n$] Otherwise $k<n$. Use another algorithm to output a proof of the following statement: ``If $P(1)$, $P(2)$,$\dots$,$P(k)$ is true, then $P(k+1)$ is true.'' Then output the statement ``We have already proved $P(1),\dots,P(k)$, hence $P(k+1)$ is true.'' Combine these statements. \textbf{I4} Set $k \leftarrow k + 1$. Go to step \textbf{I2}. Here is an inductive proof of a fact about the Fibonacci sequence. Let $F_0 = 0,F_1=1$ and $F_n = F_{n-1} + F_{n-2}$ for $n\geq 2$. Define $\phi = (1 + \sqrt{5})/2$. Then $F_n \leq \phi^{n-1}$ for all positive $n$. We proceed according to the algorithm above. This is clearly true for $n=0$ and $n=1$,l so we have obtained a proof of $P(1)$. For $P(2)$, $F_2 = 1$ and $\phi > 1.6$, so we have a (computational) proof of $P(2)$. Now assume our target is $k+1$ with $k>1$ and we have $k$ proofs $P(1),\dots,P(k)$. Since $F_{k+1} = F_k + F_{k-1}$, and by hypothesis $F_k \leq \phi^{k-1}$ and $F_{k-1} \leq \phi^{k-2}$, \begin{equation} F_{k+1} \leq \phi^{k-1} + \phi^{k-2} = \phi^{k-2}(1+\phi). \end{equation} $\phi$ is actually the positive solution to $1 + \phi = \phi^2$. So plugging this in gives $F_{k+1} \leq \phi^k$, as desired. Note that our proof would have failed if we didn't have direct proofs of $P(1)$ and $P(2)$: $P(1)$ would fail at the inductive step since the theorem is not true for $n=0$, and for $P(2)$ we couldn't have applied the method at the $F_{k-1} \leq \phi^{k-2}$ step (since $k=1$). \section{Mathematical Induction - Exercises} \indent \textbf{1.2.1} The base case $n=0$, then prove $P(0),P(1),dots,P(n)$ implies $P(n+1)$. \textbf{1.2.2} The proof uses this formula: \begin{equation*} a^{(n+1)-1} = \frac{(a^{n-1})(a^{n-1})}{(a^{(n-2)/(n-1)})^{n-1}} \end{equation*} but this formula is not valid for $n=1$. So the proof used in the base case can't be used with this formula in the inductive step. \textbf{1.2.3} Since there are $n-1$ terms in the expansion, the formula is not valid at $n=1$ and ``clearly $3/2 - 1/n = 1/2$'' is not actually a valid reading of the formula. \textbf{1.2.4} \end{document}
{ "alphanum_fraction": 0.6482648036, "avg_line_length": 52.2814371257, "ext": "tex", "hexsha": "18dfb5e48a31d8eb7bf19a30c52906ce07c2742f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "34fadc2d36068016cab66a4cc0e57c933f21c300", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "nicklecompte/CSLearning", "max_forks_repo_path": "TAoCP/Volume1/BasicConcepts.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "34fadc2d36068016cab66a4cc0e57c933f21c300", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "nicklecompte/CSLearning", "max_issues_repo_path": "TAoCP/Volume1/BasicConcepts.tex", "max_line_length": 942, "max_stars_count": null, "max_stars_repo_head_hexsha": "34fadc2d36068016cab66a4cc0e57c933f21c300", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "nicklecompte/CSLearning", "max_stars_repo_path": "TAoCP/Volume1/BasicConcepts.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 3122, "size": 8731 }
\chapter{Econometrics using stated preference}
{ "alphanum_fraction": 0.8163265306, "avg_line_length": 12.25, "ext": "tex", "hexsha": "4b9b5fe11342b5d6b68aa34c559a61d71ce3968f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/economics/econometricsStated/00-00-Chapter_name.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/economics/econometricsStated/00-00-Chapter_name.tex", "max_line_length": 46, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/economics/econometricsStated/00-00-Chapter_name.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 11, "size": 49 }
\chapter{Conclusion} In this manuscript, I have addressed the following questions and answered them based on results to a suite of micro-benchmarks I created. Those micro-benchmarks aimed at being representative of the current load faced by INGInious when dealing with student's submissions and bring to light how different configurations could affect the responsiveness of the platform. \begin{enumerate} \item Compared to other available solutions, how good is the current configuration chosen by INGInious to face the responsiveness challenge of the platform? How much better could it be? How easy would it be to improve it? Do some solutions involve tradeoffs in terms of maturity, support or maintainability? \item Could there be a solution tailor-made for the specific case of INGInious? What would it be? What would it cost to use it? \item What would be the cost of providing stronger/safer isolation to the containers used by INGInious? Which opportunities could it bring? \end{enumerate} To respond to the first question, I have questioned the current configuration that INGInious has chosen to face the responsiveness challenge of the platform. I have identified different changes that could be made to it and shown whether or not they would be worth it. The most interesting change would be to change the current container runtime in use, \texttt{runc}, to \texttt{crun}, which performs as much as twice as fast in some tests. For the second question, I have presented what would be the best solution for INGInious's case and measured its performance gain, in my micro-benchmarks, over the best existing alternative. Showing that the difference we can still gain is significant, performing up to three times as fast as INGInious's current configuration. And finally, for the last question, I have discussed some of the safer isolation solutions that are existing, virtualization and rootless containerization. I presented new opportunities it creates for INGInious, like creating tasks where the student would have full root permissions inside of a container. Here is a summary of part of the configuration I tried, and what they are worth in terms of performance, isolation, ease of use and support (through documentation, community, forums). \begin{center} \begin{tabular}{|ccc|c|c|c|c|} \hline \textbf{Manager} & \textbf{Runtime} & \textbf{Rootless} & \textbf{Isol.}\footnotemark & \textbf{Resp.}\footnotemark & \textbf{Usab.}\footnotemark & \textbf{Support} \\ \hline \hline Docker & runc & No & + & +++ & +++ & +++ \\ Docker & crun & No & + & ++++ & +++ & +++\\ Docker & kata-runtime\footnotemark & No & +++ & ++ & ++ & +++\\ Docker & kata-fc\footnotemark & No & +++ & + & ++ & +++\\ Podman & runc & No & + & ++ & +++ & +++\\ Podman & crun & No & + & +++ & +++ & +++\\ Podman & crun & Yes & ++ & ++ & +++ & +++\\ LXD & LXC & No & + & +++ & +++ & ++\\ \hline \end{tabular} \end{center} \footnotetext[1]{Isolation} \footnotetext[2]{Responsiveness} \footnotetext[3]{Usability, ease of use} \footnotetext[4]{Kata Containers with Qemu hypervisor} \footnotetext[5]{Kata Containers with Firecracker hypervisor} As there is no bad solution, there is no bad score either, but the goal is to have as many "+" as possible. \subsubsection{Personal enrichment} A master thesis is much more than reading a bunch of papers, writing nice code and plotting some graphs. It has been a real challenge to discover this whole new world that is containerization and I have learned many more things than what my results show. It was my first time dealing with such a big project, and I was the only one directly contributing to it. It required more planning than what I usually need. It was also the first time that I was not relying on a course's support to learn new things. I had to find other trustful content and build my knowledge of the situation myself. This project has also been a great opportunity for me to enjoy the greatness of open-source. All the tools I have handled are open-source, and I could get support from the community when I required it, which was a big help and made me feel less alone in this. \subsubsection{Follow up questions} Even a year of work is not enough for me to cover everything on the subject I presented here. Partly because the containerization world is continuously growing and new things to consider appear regularly. And partly because I started this year with no experience or even basic knowledge on the subject, I did not know from the start what I was going to do and going further in my work has not ceased to open me new doors to look behind. Here is then a small enumeration of thing that, if I had one more year, I would consider going into: \begin{enumerate} \item As the different tools I compared are continuously evolving, and new opportunities appear, the amount of configuration to consider grows really big. It might be interesting to create a tool that could easily integrate into its benchmark new upcoming solutions. It would allow us to get periodically updated results as some tools grow. \item LXD can also be used to manage virtual machines, I do not know how it works, what it relies on and if the performances are anywhere near ones of containers, but it could be worth taking a look at. \item LXD has also its own "rootless" containers feature, with unprivileged containers, I did not have the chance to take a look at this but it could be interesting as well. \item My knowledge of virtualization technologies is weaker than containerization one, and there is one result that I can not explain, which is the really bad behavior of storage drivers others than devicemapper with Kata Containers. \end{enumerate}
{ "alphanum_fraction": 0.7570093458, "avg_line_length": 107, "ext": "tex", "hexsha": "ef468f461e933721dff7445bc9bd089a3946dcc0", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bdf65f6d3286f28df9bf44b507b708d425128b6b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "edvgui/LEPL2990-Manuscript", "max_forks_repo_path": "chapters/06-conclusion.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "bdf65f6d3286f28df9bf44b507b708d425128b6b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "edvgui/LEPL2990-Manuscript", "max_issues_repo_path": "chapters/06-conclusion.tex", "max_line_length": 855, "max_stars_count": 1, "max_stars_repo_head_hexsha": "bdf65f6d3286f28df9bf44b507b708d425128b6b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "edvgui/LEPL2990-Manuscript", "max_stars_repo_path": "chapters/06-conclusion.tex", "max_stars_repo_stars_event_max_datetime": "2020-06-13T22:42:37.000Z", "max_stars_repo_stars_event_min_datetime": "2020-06-13T22:42:37.000Z", "num_tokens": 1357, "size": 5778 }
\documentclass[{{cookiecutter.project_slug}}.tex]{subfiles} \begin{document} \chapter{Conclusion} \end{document}
{ "alphanum_fraction": 0.7672413793, "avg_line_length": 14.5, "ext": "tex", "hexsha": "f10d5240ca6ac334f4aa94b7e0395bfcb93d0bd9", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-12-16T12:57:47.000Z", "max_forks_repo_forks_event_min_datetime": "2020-03-23T16:54:47.000Z", "max_forks_repo_head_hexsha": "03559cb4cc4cf09ce38b6dc1553a69f9442729da", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Oli4/cookiecutter-latex-thesis", "max_forks_repo_path": "{{cookiecutter.project_slug}}/Conclusion.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "03559cb4cc4cf09ce38b6dc1553a69f9442729da", "max_issues_repo_issues_event_max_datetime": "2022-02-26T22:42:18.000Z", "max_issues_repo_issues_event_min_datetime": "2018-08-07T12:25:25.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Oli4/cookiecutter-latex-thesis", "max_issues_repo_path": "{{cookiecutter.project_slug}}/Conclusion.tex", "max_line_length": 59, "max_stars_count": 5, "max_stars_repo_head_hexsha": "03559cb4cc4cf09ce38b6dc1553a69f9442729da", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Oli4/cookiecutter-latex-thesis", "max_stars_repo_path": "{{cookiecutter.project_slug}}/Conclusion.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-26T22:40:02.000Z", "max_stars_repo_stars_event_min_datetime": "2019-07-25T06:29:08.000Z", "num_tokens": 32, "size": 116 }
\section{Leadership} \resumeSubHeadingListStart %s \resumeSubHeadingListEnd \vspace{-16pt}
{ "alphanum_fraction": 0.7102803738, "avg_line_length": 17.8333333333, "ext": "tex", "hexsha": "4d9d95f861a5dbf19ff1ea33e57d232f5b8ead9f", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-07-26T11:31:03.000Z", "max_forks_repo_forks_event_min_datetime": "2021-07-26T11:31:03.000Z", "max_forks_repo_head_hexsha": "9744525ddb0d4298ab36c4d979e94c0c0b294ac0", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jackzbazuka/latex-pdf-automation", "max_forks_repo_path": "templates/leadership_parent.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9744525ddb0d4298ab36c4d979e94c0c0b294ac0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jackzbazuka/latex-pdf-automation", "max_issues_repo_path": "templates/leadership_parent.tex", "max_line_length": 30, "max_stars_count": null, "max_stars_repo_head_hexsha": "9744525ddb0d4298ab36c4d979e94c0c0b294ac0", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jackzbazuka/latex-pdf-automation", "max_stars_repo_path": "templates/leadership_parent.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 32, "size": 107 }
\documentclass[12pt,letterpaper]{article} % Use packages \usepackage{multirow} \usepackage[utf8]{inputenc} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{hyperref} \usepackage{tabularx} \usepackage{booktabs} \usepackage[normalem]{ulem} \usepackage{mdframed} \usepackage{color} \usepackage{float} \usepackage{indentfirst} \usepackage{graphicx} \newmdenv[linecolor=black]{reqbox} \newcounter{acnum} \newcommand{\actheacnum}{AC\theacnum} \newcommand{\acref}[1]{AC\ref{#1}} \newcounter{ucnum} \newcommand{\uctheucnum}{UC\theucnum} \newcommand{\uref}[1]{UC\ref{#1}} \newcounter{mnum} \newcommand{\mthemnum}{M\themnum} \newcommand{\mref}[1]{M\ref{#1}} % Make title \title{SE 3XA3 Module Guide: Revision 1} \author{Team 03, Pongthusiastics \\ Adwity Sharma - sharma78 \\ Arfa Butt - buttaa3 \\ Jie Luo - luoj3 } \date{\today} \begin{document} \maketitle \newpage \tableofcontents \listoftables \listoffigures \begin{table}[h] \caption{\bf Revision History} \begin{tabularx}{\textwidth}{p{3.5cm}p{2cm}X} \toprule {\bf Date} & {\bf Version} & {\bf Notes}\\ \midrule November 9, 2016 & 1.0 & Created Module Guide \\ November 11, 2016 & 2.0 & Divided sections between group members \\ November 13, 2016 & 3.0 & Created format for Module Guide and added sections 2 and 4 \\ November 13, 2016 & 4.0 & Sections 1, 3 and 6 added \\ November 14, 2016 & 5.0 & Final version with all sections added\\ \textcolor{blue}{December 3, 2016} & \textcolor{blue}{6.0} & \textcolor{blue}{Section 1 to 4 revised for Revision 1}\\ \textcolor{blue}{December 8, 2016} & \textcolor{blue}{7.0} & \textcolor{blue}{Section 5 to 7 revised for Revision 1}\\ \bottomrule \end{tabularx} \end{table} \clearpage \section{Introduction} \label{intro} \subsection{\textcolor{blue}{Project Introduction}} \textcolor{blue}{This project is the redevelopment of a Pong game found on GitHub. The new game FaultInOurPong developed by the PongThusiastics Team would not only fix bugs currently discovered in the previous project, but also add more entertaining features in order to maximize the satisfactions of potential players. Apart from the executable, several formal documents and test cases that are crutial in the process of software development process and management are made for the public and internal developers.}\\ \subsection{Document Overview} \indent This document indicates the Module Guides for the implementation of the “Fault in Our Pong” project. This document is intended to facilitate the design and maintenance of the project. \textcolor{blue}{The main purpose of the Module Guide (MG) is to give an overview of each module in a project after the system decomposition. The Module Guide is formulated after the completion of Software Requirement Specifications (SRS). The Software Requirement Specification describes all the functional and non-functional requirements after project research and interviews with stakeholders.}\\ \textcolor{blue}{The completion of the Module Guide will facilitate the production of Module Interface Specification (MIS). The Module Interface Specification exposes the secrets in each module, and it describes the detailed constructions of the modules in words. Compared to the Module Guide as the black box of modules in a system, the Module Interface Specification serves as a white box for users and developers to understand how the modules are composed. }\\ The major purpose of this document is to provide a detailed information for the concerned parties about how and why a certain implementation has been carried out. The potential readers of the document are as follows: New project members: If new project members are added to the project then this document, along with the document about the MIS implementation, would help the new members understand how and why the functionalities have been implemented. It will also help them understand the features that must be preserved. This document provides the designers with a means of communication about the module specifications. It also helps determine if the requirements have been met. It can also show the flexibility and feasibility of various modules. It is important for the people responsible for maintaining the modules to understand the hierarchical structure of the modules. This document helps people responsible for updating this project to understand the way the implementation has been done for the project. \subsection {Design Decision} The design for this project FaultInOurPong follows the following rules: \begin{enumerate} \item MVC model: MVC model has been implemented in rigorously in the project. The design has been separated in model, view and \textcolor{blue}{control frameworks}. The model class is responsible for managing the data, logic and rules of application of the project. The view is responsible for the output representation of the information. The controller is responsible for the implementations of commands from users and manipulates the model. \item Each data structure is implemented in only one model.\textcolor{blue}{And they would be exported to another modules for interactions.} \item The implementations that are likely to change are stored in separate modules. \item \textcolor{blue}{The concepts of separation of concern (SC), information hiding (IH), and abstractions are used to further organize the code.} \end{enumerate} \subsection{Document Structure} The rest of the document is arranged as follows: Section \ref{SecChange} provides details about anticipated and unlikely changes of the project. Anticipated changes are listed in Section \ref{SecAchange}, and unlikely changes are listed in Section \ref{SecUchange}. Section \ref{SecMH} contains the breakdown of the module hierarchy, per the likely changes. Section \ref{SecConnection} shows the connections between the software requirements and the modules. Section \ref{SecMD} shows a detailed breakdown of the module description. Section \ref{SecTM} includes the tractability matrix. Section \ref{SecUse} describes the use hierarchy between various modules. \subsection{Acronyms and Definitions} \begin{table}[H] \centering \caption{\bf Acronyms} \label{TableAcronym} \bigskip \def\arraystretch{1.5} \begin{tabularx}{\textwidth}{p{3.7cm}X} \toprule \textbf{Acronym} & \textbf{Definition} \\ \midrule AC & Anticipated Change\\ DAG & Directed Acyclic Graph\\ FR & Functional Requirement\\ IH & Information Hiding\\ MG & Module Guide\\ MIS & Module Interface Specification\\ NFR & Non-Functional Requirement\\ SRS & Software Requirements Specification\\ UC & Unlikely Change\\ \bottomrule \end{tabularx} \end{table} \begin{table}[H] \centering \caption{\bf Definitions} \label{TableDefinitions} \bigskip \def\arraystretch{1.5} \begin{tabularx}{\textwidth}{p{3.7cm}X} \toprule \textbf{Term} & \textbf{Definition}\\ \midrule %%% \textbf{Welcome page} & The first window shown on the screen when the program starts\\ \bottomrule \end{tabularx} \end{table} \section{Anticipated and Unlikely Changes} \label{SecChange} \textcolor{blue}{All the possible changes in listed in the first secion \ref{SecAchange}, and the unlikely changes are listed in the second section \ref{SecUchange}.} \subsection{Anticipated Changes} \label{SecAchange} \begin{description} \item[\refstepcounter{acnum} \actheacnum \label{acHardware}:] The specific hardware on which the game is running. \item[\refstepcounter{acnum} \actheacnum \label{acInput}:] The format of the input data. (left and right keys can be changed to different keys inside the GameController class without it affecting the rest of the project) \item[\refstepcounter{acnum} \actheacnum \label{acConstraint}:] The constraints on the input parameters. \item[\refstepcounter{acnum} \actheacnum \label{acFeatures}:] Game features. (Number of people added on the highscores list, number of lives given to the user) \item[\refstepcounter{acnum} \actheacnum \label{acMode}:] Additional features. (Advanced single player mode with obstacles added, different speeds of the ball) \item[\refstepcounter{acnum} \actheacnum \label{acMag}:] Magnitude of game controls and media (size of the buttons, ball etc.). \end{description} \subsection{Unlikely Changes} \label{SecUchange} \begin{description} \item[\refstepcounter{ucnum} \uctheucnum \label{ucIO}:] Input and output devices. (Input: mouse clicks and keyboard presses, Output: screen/console) \item[\refstepcounter{ucnum} \uctheucnum \label{ucInput}:] There will always be a source of input data external to the software. \item[\refstepcounter{ucnum} \uctheucnum \label{ucMech}:] Game mechanics. (Formulas to calculate when ball should change direction) \item[\refstepcounter{ucnum} \uctheucnum \label{ucEnv}:] Execution environment. (Must be java-based) \end{description} %%% \section{Module Hierarchy} \label{SecMH} This section provides an overview of the module design. Modules are summarized in a hierarchy decomposed by secrets in Table 2. The modules listed below, which are leaves in the hierarchy tree, are the modules that will actually be implemented. \begin{table}[h!] \centering \begin{tabular}{p{0.5\textwidth} p{0.5\textwidth}} \toprule \textbf{Level 1} & \textbf{Level 2}\\ \midrule {Hardware-Hiding Module} & PongGame \refstepcounter{mnum} \mthemnum \label{mHH} \\ \midrule \multirow{7}{0.3\textwidth}{Behaviour-Hiding Module} & Ball \refstepcounter{mnum} \mthemnum \label{mBall} \\ & GameModel \refstepcounter{mnum} \mthemnum \label{mGM}\\ & Paddle \refstepcounter{mnum} \mthemnum \label{mPad}\\ & Player \refstepcounter{mnum} \mthemnum \label{mPlayer}\\ & GameView \refstepcounter{mnum} \mthemnum \label{mV}\\ & HighScore \refstepcounter{mnum} \mthemnum \label{mScore}\\ & Mode \refstepcounter{mnum} \mthemnum \label{mMode}\\ & PongGameDisplay \refstepcounter{mnum} \mthemnum \label{mDisplay}\\ & Tutorial \refstepcounter{mnum} \mthemnum \label{mTut}\\ & Welcome \refstepcounter{mnum} \mthemnum \label{mWel}\\ \midrule \multirow{2}{0.3\textwidth}{Software Decision Module} & GameController \refstepcounter{mnum} \mthemnum \label{mCon}\\ \bottomrule \end{tabular} \caption{Module Hierarchy} \label{TblMH} \end{table} \section{Connection Between Requirements and Design} \label{SecConnection} The design of the system is intended to satisfy the requirements developed in the SRS. In this stage, the system is decomposed into modules. The connection between requirements and modules is listed in Table \ref{TblRT}. \section{Module Decomposition} \label{SecMD} \subsection{Hardware Hiding Modules} \begin{description} \item[Name: ] PongGame M\ref{mHH} \item[Secrets: ] \textcolor{blue}{This module starts running in Java Development Environment.} \item[Services: ] \textcolor{blue} {This module invokes the computer to display game windows.} \item[Implemented By: ] Windows \end{description} \subsection{Behavior-Hiding Module} \subsubsection{Ball M\ref{mBall}} \begin{description} \item[Secrets: ] \textcolor{blue}{This data structure stores all the information of a ball.} \item[Services: ] \textcolor{blue} {This module contains all the operations for a ball, including its postions, and size.} \item[Implemented By: ] Ball.java \end{description} \subsubsection{GameModel M\ref{mGM}} \begin{description} \item[Secrets: ] \textcolor{blue}{This model is the interface of the model framework in the MVC design pattern.} \item[Services: ] \textcolor{blue} {It coroperates other data models such that other models can interact with the controller framework.} \item[Implemented By: ] GameModel.java \end{description} \subsubsection{Paddle M\ref{mPad}} \begin{description} \item[Secrets: ] \textcolor{blue}{This data structure stores all the information of a paddle.} \item[Services: ] \textcolor{blue} {This module contains all the operations for a paddle, including its postions, height, and width.} \item[Implemented By: ] Paddle.java \end{description} \subsubsection{Player M\ref{mPlayer}} \begin{description} \item[Secrets: ] \textcolor{blue}{This data structure stores all the information of a player.} \item[Services: ] \textcolor{blue} {This module contains all the operations for a player, including his/her score and methods to increase/decrease score.} \item[Implemented By: ] Player.java \end{description} \subsubsection{GameView M\ref{mV}} \begin{description} \item[Secrets: ] \textcolor{blue}{This module acts as an interface for all other view modules.} \item[Services: ] \textcolor{blue} {This module cooperates with other view modules such that they can interact with the controller framework.} \item[Implemented By: ] GameView.java \end{description} \subsubsection{HighScore M\ref{mScore}} \begin{description} \item[Secrets: ] \textcolor{blue}{This module displays the high scores of a player from a text file.} \item[Services: ] \textcolor{blue} {It sets up a window/frame for display the scores for top 20 players.} \item[Implemented By: ] HighScore.java \end{description} \subsubsection{Mode M\ref{mMode}} \begin{description} \item[Secrets: ] \textcolor{blue}{This module displays different game modes for the player.} \item[Services: ] \textcolor{blue} {It sets up a window/frame with buttons for the user to choose differet game modes.} \item[Implemented By: ] Mode.java \end{description} \subsubsection{PongGameDisplay M\ref{mDisplay}} \begin{description} \item[Secrets: ] \textcolor{blue}{This module displays the actual game panel.} \item[Services: ] \textcolor{blue} {It sets up the game panel by drawing objects such as paddles, ball, and current scores on the screen.} \item[Implemented By: ] PongGameDisplay.java \end{description} \subsubsection{Tutorial M\ref{mTut}} \begin{description} \item[Secrets: ] \textcolor{blue}{This module displays the game instruction.} \item[Services: ] \textcolor{blue} {It sets up the window for the tutorial page by displaying a picture of the instruction} \item[Implemented By: ] Tutorial.java \end{description} \subsubsection{Welcome M\ref{mWel}} \begin{description} \item[Secrets: ] \textcolor{blue}{This module displays the first window when the program starts.} \item[Services: ] \textcolor{blue} {It creates different buttons for options so that a user can choose an option from it and start the game} \item[Implemented By: ] Welcome.java \end{description} \subsection{Software Decision Module} \subsubsection{GameController M\ref{mCon}} \begin{description} \item[Secrets: ] \textcolor{blue}{This module contains part of the logic of the game.} \item[Services: ] \textcolor{blue} {It performs some calculations to determine the winning/losing of a player; it also takes in hardware/environment variables and pass them into model and view framework.} \item[Implemented By: ] Ball.java \end{description} \section{Traceability Matrix} \label{SecTM} \begin{table}[h!] \centering \begin{tabular}{p{0.2\textwidth} p{0.6\textwidth}} \toprule \textbf{Requirements} & \textbf{Modules}\\ \midrule {R1} & \mref{mHH}, \mref{mWel}, \mref{mV}, \mref{mCon}\\ {R2} & \mref{mHH}, \mref{mWel}, \mref{mV}, \mref{mCon}\\ {R3} & \mref{mPlayer}, \mref{mV}, \mref{mCon}\\ {R4} & \mref{mHH}, \mref{mScore}, \mref{mWel}, \mref{mCon}\\ {R5} & \mref{mHH}, \mref{mV}, \mref{mTut}, \mref{mCon} \\ {R6} & \mref{mMode}, \mref{mCon} \\ {R7} & \mref{mMode}, \mref{mCon} \\ {R8} & \mref{mGM}, \mref{mPlayer}\\ {R9} & \mref{mGM}, \mref{mPlayer}\\ {R10} & \mref{mPad}, \mref{mDisplay}, \mref{mCon}\\ {R11} & \mref{mPlayer}, \mref{mCon}\\ {R12} & \mref{mV}, \mref{mCon}\\ {R13} & \mref{mV}, \mref{mCon}\\ {R14} & \mref{mV}, \mref{mCon}\\ {R15} & \mref{mV}, \mref{mCon}\\ {R16} & \mref{mWel}, \mref{mWel}\\ {R17} & \mref{mScore}, \mref{mCon}\\ {R18} & \mref{mV}, \mref{mCon}\\ \bottomrule \end{tabular} \caption{Trace Between Requirements and Modules} \label{TblRT} \centering %%% \begin{tabular}{p{0.2\textwidth} p{0.6\textwidth}} \toprule \textbf{AC} & \textbf{Modules}\\ \midrule \acref{acHardware} & \mref{mHH} \\ \acref{acInput} & \mref{mCon} \\ \acref{acConstraint} & \mref{mCon}\\ \acref{acFeatures} & \mref{mPlayer}, \mref{mScore}\\ \acref{acMode} & \mref{mBall}, \mref{mV}, \mref{mMode}, \mref{mCon} \\ \acref{acMag} & \mref{mBall}, \mref{mPad}, \mref{mWel} \\ \bottomrule \end{tabular} \caption{Trace Between Anticipated Changes and Modules} \label{TblACT} \end{table} \clearpage \section{Use Hierarchy Between Modules} \label{SecUse} \textcolor{blue}{User hierarchy can be depicted below \hyperref[FigUH]{Figure 1} in the graph. The modules listed in the document form a directed acyclic graph (DAG).} \begin{figure}[H] \label{FigUH} \caption{\bf Use Hierarchy} \centering \bigskip \includegraphics[width=0.95\textwidth]{user.png} \end{figure} \end{document}
{ "alphanum_fraction": 0.73900701, "avg_line_length": 56.7796052632, "ext": "tex", "hexsha": "576b9846bd4b6acf70a69108771e3f015b26013f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e60288d4fceaf686212d66ef41167ce4cce8f642", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ArfaAmer/FaultInOurPong", "max_forks_repo_path": "Doc/Design/MG/MG - Revision 1.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e60288d4fceaf686212d66ef41167ce4cce8f642", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ArfaAmer/FaultInOurPong", "max_issues_repo_path": "Doc/Design/MG/MG - Revision 1.tex", "max_line_length": 592, "max_stars_count": null, "max_stars_repo_head_hexsha": "e60288d4fceaf686212d66ef41167ce4cce8f642", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ArfaAmer/FaultInOurPong", "max_stars_repo_path": "Doc/Design/MG/MG - Revision 1.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4930, "size": 17261 }