Search is not available for this dataset
text
string
meta
dict
\documentclass{article} \usepackage{amsmath} \pgfplotsset{compat=newest} % Allows to place the legend below plot \begin{document} \begin{equation*} f(x) = x^2 \end{equation*} \begin{align*} f(x) &= x^2\\ g(x) &= \frac{1}{x}\\ F(x) &= \int^a_b \frac{1}{3}x^3 \end{align*} \section{Section} Hello World! \subsection{Subsection} Structuring a document is easy! \subsubsection{Subsubsection} More text. \paragraph{Paragraph} Some more text. \subparagraph{Subparagraph} Even more text. \section{Another section} \begin{figure} \includegraphics[width=\linewidth]{boat.jpg} \caption{A boat.} \label{fig:boat1} \end{figure} Figure \ref{fig:boat1} shows a boat. \newpage \bibliography{lesson7a1} \bibliographystyle{ieeetr} Random citation \autocite[1]{DUMMY:1} embeddeed in text. \newpage \printbibliography This is some example text\footnote{\label{myfootnote}Hello footnote}. \begin{table}[h!] \centering \caption{Caption for the table.} \label{tab:table1} \begin{tabular}{ccc} \toprule Some & actual & content\\ \midrule prettifies & the & content\\ as & well & as\\ using & the & booktabs package\\ \bottomrule \end{tabular} \end{table} \begin{figure}[h!] \begin{center} \begin{tikzpicture} \begin{axis}[ width=\linewidth, % Scale the plot to \linewidth grid=major, % Display a grid grid style={dashed,gray!30}, % Set the style xlabel=X Axis $U$, % Set the labels ylabel=Y Axis $I$, x unit=\si{\volt}, % Set the respective units y unit=\si{\ampere}, legend style={at={(0.5,-0.2)},anchor=north}, % Put the legend below the plot x tick label style={rotate=90,anchor=east} % Display labels sideways ] \addplot % add a plot from table; you select the columns by using the actual name in % the .csv file (on top) table[x=column 1,y=column 2,col sep=comma] {table.csv}; \legend{Plot} \end{axis} \end{tikzpicture} \caption{My first autogenerated plot.} \end{center} \end{figure} \end{document}
{ "alphanum_fraction": 0.6458137347, "avg_line_length": 20.8431372549, "ext": "tex", "hexsha": "6a2a342b4cfead59b6c77e2ee6a733553138ad3d", "lang": "TeX", "max_forks_count": 37, "max_forks_repo_forks_event_max_datetime": "2019-10-21T10:21:15.000Z", "max_forks_repo_forks_event_min_datetime": "2016-08-12T19:10:25.000Z", "max_forks_repo_head_hexsha": "d29ff99615def3b18c88a8b4b81c0fb32e6f2c7f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jlord/flatwhite-syntax", "max_forks_repo_path": "code-samples/latex-sample.tex", "max_issues_count": 33, "max_issues_repo_head_hexsha": "d29ff99615def3b18c88a8b4b81c0fb32e6f2c7f", "max_issues_repo_issues_event_max_datetime": "2021-04-08T18:38:43.000Z", "max_issues_repo_issues_event_min_datetime": "2016-08-12T16:11:03.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jlord/flatwhite-syntax", "max_issues_repo_path": "code-samples/latex-sample.tex", "max_line_length": 86, "max_stars_count": 125, "max_stars_repo_head_hexsha": "479743c6b3caba2852f9e9607f0b006fbe1b0324", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Willamin/flatdark-syntax", "max_stars_repo_path": "code-samples/latex-sample.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-15T13:56:08.000Z", "max_stars_repo_stars_event_min_datetime": "2016-08-12T16:11:17.000Z", "num_tokens": 663, "size": 2126 }
\chapter{Applications in the Network} \label{chap:applications}
{ "alphanum_fraction": 0.8, "avg_line_length": 16.25, "ext": "tex", "hexsha": "3cf7f47e5ca47e455c3ab825978c33a553a67ba2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9d79fc009b29bfec577449c88dfffc9a208f75b0", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "daidahao/Building-Secure-Networks-Report", "max_forks_repo_path": "Chapter/Applications.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9d79fc009b29bfec577449c88dfffc9a208f75b0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "daidahao/Building-Secure-Networks-Report", "max_issues_repo_path": "Chapter/Applications.tex", "max_line_length": 37, "max_stars_count": null, "max_stars_repo_head_hexsha": "9d79fc009b29bfec577449c88dfffc9a208f75b0", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "daidahao/Building-Secure-Networks-Report", "max_stars_repo_path": "Chapter/Applications.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 15, "size": 65 }
\documentclass{article} \usepackage{tocloft} \include{common_symbols_and_format} \renewcommand{\cfttoctitlefont}{\Large\bfseries} \begin{document} \logo \rulename{Test Rule} \ruledescription{This rule uses differentials between four moving averages to determine trade positioning. The parameters accepted are the integer length of each short average (2 parameters - one for price, one for research), the additional number of days for the longer averages (2 parameters - also, one for price, one for research) and 4 coefficients for each average's weighting contribution. The total sum is divided by the current price to calculate a position size.} \ruleparameters {Short price average length}{2}{Number of days in the short price average.}{$\averagelengthshort^{\price}$} {Long price average length}{5}{Number of additional days in the longer price average (added to the number in the short price average).}{$\averagelengthlong^{\price}$} {Short research average length}{2}{Number of days in the short research average.}{$\averagelengthshort^{\research}$} {Long research average length}{5}{Number of additional days in the longer research average (added to the number in the short price average).}{$\averagelengthlong^{\research}$} {Amplitude of short price average}{1.0}{Weighting coefficient for the short term average of price.}{$\amplitudecoefficientone^{\price}$} {Amplitude of long price average}{1.0}{ Weighting coefficient for the long term average of price.}{$\amplitudecoefficienttwo^{\price}$} {Amplitude of short research average}{1.0}{Weighting coefficient for the short term average of research.}{$\amplitudecoefficientone^{\research}$} {Amplitude of long research average}{1.0}{Weighting coefficient for the long term average of research.}{$\amplitudecoefficienttwo^{\research}$} \stoptable \section{Equation} \begin{equation} \bigcontribution(\currenttime, \averagelength, \amplitudecoefficient, \genericfunction) = \frac{\amplitudecoefficient}{\averagelength} \sum_{\dummyiterator=0}^{\averagelength-1} \genericfunction(\currenttime - \dummyiterator)\\ \label{eq1} \end{equation} \begin{equation} \position(\currenttime) = \frac{\bigcontribution(\currenttime, \averagelengthshort^{\price}, \amplitudecoefficientone^{\price}, \price)+\bigcontribution(\currenttime, (\averagelengthshort^{\price} + \averagelengthlong^{\price}), \amplitudecoefficienttwo^{\price},\price)+\bigcontribution(\currenttime, \averagelengthshort^{\research}, \amplitudecoefficientone^{\research}, \research)+\bigcontribution(\currenttime, (\averagelengthshort^{\research} + \averagelengthlong^{\research}), \amplitudecoefficienttwo^{\research}, \research)}{\price(\currenttime)} \\ \label{eq2} \end{equation} \hspace{200mm} \noindent where $\position_\currenttime$ is the portfolio allocation at time $\currenttime$, $\price = \price(\currenttime)$ is the value of the price series and $\research = \research(\currenttime)$ is the value of the research series. \hspace{200mm} \hspace{200mm} \keyterms \furtherlinks \end{document}
{ "alphanum_fraction": 0.7807133421, "avg_line_length": 68.8181818182, "ext": "tex", "hexsha": "3883520e47e9c232e67e18fef3b217088dba90de", "lang": "TeX", "max_forks_count": 28, "max_forks_repo_forks_event_max_datetime": "2021-11-10T18:21:14.000Z", "max_forks_repo_forks_event_min_datetime": "2021-03-26T14:26:04.000Z", "max_forks_repo_head_hexsha": "48231c2c026b4163291e299cd938969401ca6a4a", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "pawkw/infertrade", "max_forks_repo_path": "docs/strategies/tex/Example_TestRule.tex", "max_issues_count": 137, "max_issues_repo_head_hexsha": "48231c2c026b4163291e299cd938969401ca6a4a", "max_issues_repo_issues_event_max_datetime": "2022-01-28T19:36:30.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-25T10:59:46.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "pawkw/infertrade", "max_issues_repo_path": "docs/strategies/tex/Example_TestRule.tex", "max_line_length": 557, "max_stars_count": 34, "max_stars_repo_head_hexsha": "48231c2c026b4163291e299cd938969401ca6a4a", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "pawkw/infertrade", "max_stars_repo_path": "docs/strategies/tex/Example_TestRule.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-06T23:03:01.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-25T13:32:54.000Z", "num_tokens": 797, "size": 3028 }
\chapter{Implementation}\label{ch:implementation} We have implemented the dual ascent routing module for Floodlight latest master version\footnote{Note that master version of Floodlight requires JDK 8.} available on Github\footnote{\url{https://github.com/floodlight/floodlight}}. More precisely, we started with the version at commit~\href{https://github.com/floodlight/floodlight/tree/d737cb05656a6038f4e2277ffb4503d45b7b29cb}{\code{d737cb0}}. To know how to get the code and execute it, check \appendixref{appendix:tutorial}. The module is implemented in two classes. Both classes are placed in the \code{net.floodlightcontroller.topology} package: \begin{itemize} \item \code{DualAscentTopologyManager} \item \code{DualAscentTopologyInstance} \end{itemize} The \code{DualAscentTopologyManager} class is just the adaptation of the \code{TopologyManager} class for our module. It does not contains any additional code other than a substitution of all references to \code{TopologyInstance} into \code{DualAscentTopologyInstance}. The \code{DualAscentTopologyInstance} class contains code taken from the \code{TopologyInstance} class except for the \code{dijkstra} method, which has been substituted with the \code{dualAscent} method that is the core of our implementation. The \code{dualAscent} method first performs some initialization of needed data structures (we have also added the \code{Graph} class used to represent a graph composed by nodes and directed links). An important part of the initialization process is choosing the topology root of the dual ascent algorithm, as shown in \lstref{lst:isdstrooted}. \lstinputlisting[language=java, style=javacode, label={lst:isdstrooted}, caption={Initialization of the root of the topology}, firstline=14, lastline=14]{dualAscent.java} Then, the most relevant part that implements the dual ascent algorithm is shown in \lstref{lst:dualascentmain}. \lstinputlisting[language=java, style=javacode, label={lst:dualascentmain}, caption={Main cycle of the dual ascent algorithm}, firstline=31, lastline=38]{dualAscent.java} The \code{findRootComp} method searches for a root component in the auxiliary graph. If no root component is found, it returns \code{null}. The \code{findMinArc} method returns the arc with minimum cost that is in the complete graph of the topology but not in the auxiliary graph. If it returns \code{null}, meaning that there is no link satisfying the requirements, the current root component is added to a set in order to instruct the \code{findRootComp} method to not return the same root component again, avoiding an infinite cycle. The \code{editCosts} method updates the table of reduced costs which is used by the \code{findMinArc} method whenever a minimum cost arc has to be retrieved from the complete graph of the topology. Finally, the \code{addArc} method adds \code{minLink} to the list of links in the auxiliary graph and updates the \code{connections} matrix that maintains the list of nodes that are connected (directly or indirectly) in the auxiliary graph. This matrix is inspected by the \code{areConnected} method which is mainly used by the \code{findRootComp} method to identify root components. When there are no new root components, the loop ends. Then, all nodes that are not connected (directly or indirectly) with the root node are removed from the auxiliary graph, as shown in \lstref{lst:dualascentnodescleanup}, since they are not needed. \lstinputlisting[language=java, style=javacode, label={lst:dualascentnodescleanup}, caption={Nodes disconnected from the root node are removed from the auxiliary graph}, firstline=40, lastline=45]{dualAscent.java} After that, the code shown in \lstref{lst:dualascentinvertlinks} is used to consider the case where the root node is the destination of the \code{dualAscent} method. In this case the root is required to be the node that all other nodes converge to, and since the original dual ascent computes a path from the root to all other target nodes, in this case all paths must start from target nodes and end to the root node. This operation is possible thanks to the fact that in the complete graph there are always links in the opposite direction. \lstinputlisting[language=java, style=javacode, label={lst:dualascentinvertlinks}, caption={If the root node is the destination requested by Floodlight, the links are inverted}, firstline=47, lastline=67]{dualAscent.java} Finally the final broadcast tree is built. This is done using the method \code{buildBroadcastTree} which resolves the minimum spanning tree problem using the Dijkstra's algorithm (this is necessary to remove cycles in the final broadcast tree).
{ "alphanum_fraction": 0.8059765208, "avg_line_length": 49.8404255319, "ext": "tex", "hexsha": "7dc7b6112e83b88f335f7462e52d921751a21fc8", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ea7e3b5f81e252705f11577977a88b357a4efd55", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "SpeedJack/anaws", "max_forks_repo_path": "doc/chapters/implementation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ea7e3b5f81e252705f11577977a88b357a4efd55", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "SpeedJack/anaws", "max_issues_repo_path": "doc/chapters/implementation.tex", "max_line_length": 117, "max_stars_count": null, "max_stars_repo_head_hexsha": "ea7e3b5f81e252705f11577977a88b357a4efd55", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "SpeedJack/anaws", "max_stars_repo_path": "doc/chapters/implementation.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1161, "size": 4685 }
\documentclass{article} \usepackage{subfig} \usepackage{multirow} \usepackage{tabularx} \usepackage{placeins} \usepackage{subfig} \usepackage{graphicx} \usepackage{caption} \usepackage{float} \usepackage{hyperref} \usepackage{array} % if you need to pass options to natbib, use, e.g.: % \PassOptionsToPackage{numbers, compress}{natbib} % before loading neurips_2020 % ready for submission % \usepackage{neurips_2020} % to compile a preprint version, e.g., for submission to arXiv, add add the % [preprint] option: % \usepackage[preprint]{neurips_2020} % to compile a camera-ready version, add the [final] option, e.g.: \usepackage[final]{neurips_2020} % to avoid loading the natbib package, add option nonatbib: % \usepackage[nonatbib]{neurips_2020} \usepackage{listings} \usepackage[utf8]{inputenc} % allow utf-8 input \usepackage[T1]{fontenc} % use 8-bit T1 fonts \usepackage{hyperref} % hyperlinks \usepackage{url} % simple URL typesetting \usepackage{booktabs} % professional-quality tables \usepackage{amsfonts} % blackboard math symbols \usepackage{nicefrac} % compact symbols for 1/2, etc. \usepackage{microtype} % microtypography \usepackage{xcolor} \usepackage{placeins} % in-place positioning of figures/tables \usepackage{graphicx} \usepackage{adjustbox} \renewcommand{\arraystretch}{1.5} \definecolor{codegreen}{rgb}{0,0.6,0} \definecolor{codegray}{rgb}{0.5,0.5,0.5} \definecolor{codepurple}{rgb}{0.58,0,0.82} \definecolor{backcolour}{rgb}{0.95,0.95,0.92} \lstdefinestyle{mystyle}{ backgroundcolor=\color{backcolour}, commentstyle=\color{codegreen}, keywordstyle=\color{magenta}, numberstyle=\tiny\color{codegray}, stringstyle=\color{codepurple}, basicstyle=\ttfamily\footnotesize, breakatwhitespace=false, breaklines=true, captionpos=b, keepspaces=true, numbers=left, numbersep=5pt, showspaces=false, showstringspaces=false, showtabs=false, tabsize=2 } \lstset{style=mystyle} \title{Youtube Educational Content Analysis\\\\ \large{Group 17}} % The \author macro works with any number of authors. There are two commands % used to separate the names and addresses of multiple authors: \And and \AND. % % Using \And between authors leaves it to LaTeX to determine where to break the % lines. Using \AND forces a line break at that point. So, if LaTeX puts 3 of 4 % authors names on the first line, and the last on the second line, try using % \AND instead of \And before the third author name. \author{% Ayush Kumar\\ % Indian Institute of Technology\\ % Kanpur \\ Roll No: 170195\\ \texttt{[email protected]} \\ % examples of more authors \And Harsh Agarwal \\ % Indian Institute of Technology\\ % Kanpur \\ Roll No: 170287\\ \texttt{[email protected]} \\ \AND Keshav Bansal\\ % Indian Institute of Technology\\ % Kanpur \\ Roll No: 170335\\ \texttt{[email protected]} \\ \And Snehal Raj\\ % Indian Institute of Technology\\ % Kanpur \\ Roll No: 170705\\ \texttt{[email protected]} \\ \And Umang Malik\\ % Indian Institute of Technology\\ % Kanpur \\ Roll No: 170765\\ \texttt{[email protected]} \\ } \begin{document} \maketitle \begin{abstract} YouTube has served as a free education sharing platform for over a decade now. Various colleges around the world have set up multiple organizations to put out some content for free. Even independent creators have also gained a large audience on YouTube. Through this project, we analyze the educational domain videos to draw insights that can improve the quality of content currently being delivered by content creators and instructors on the platform. \end{abstract} \section{Problem Statement} Educational videos can enhance learning and easily integrate into standard instructional methods. Studies have shown that the use of short video clips allows for more efficient processing and memory recall. The visual and auditory nature of videos appeals to a broad audience and allows each user to process information in a way that’s natural to them. YouTube permits worldwide access to high-quality educational videos; however, not many studies have described the reach of educational videos on YouTube or what topics are preferred. The aim of this project is to contribute towards a better understanding of the content that is being shared in the educational videos published on the YouTube. The main goals of this project are as follows : \begin{itemize} \item Providing a comprehensive dataset of the educational video statistics on Youtube including college and branch specific content of NPTEL. \item Analysing what factors affecting the popularity of an educational video on Youtube by studying the correlation between various evaluation metrics. \item Analysis of educational content in India vs other countries based on topics covered, video length, views-like ratio. \item Using the playlist of topics to identify peaks and troughs and suggesting topics which needs improvement in future iterations. \end{itemize} \section{Introduction and Motivation} The evolution of the web and the emergence of online mode of education has enabled new levels of interaction and communication for learning. This also has led to an increase in the responsibilities of teachers, students and educational institutions. Universities are now facing the need to adapt and enter into the online space, evolving into a University 2.0 . Taking advantage of students strong interactions in those online environments, as YouTube or Facebook, universities are trying to move closer to them, establishing their official presence in the same online places. Despite this, many higher education institutions are cautious of the extent that their presence should have in these web platforms. Through this project, we aim to analyse the educational content put forth by top creators in India and worldwide. The content examined includes videos from topmost universities like IITs (NPTEL) and MIT (MIT Open Courseware) and independent creators like Khan academy and Unacademy. This project will help us to identify ways in which these content creators can increase the reachability and likeness of their content, thereby making education more accessible to the masses as well as making it a profitable venture for the creator. Further, we aim to analyse trends which help us distinguish the educational demand in India from other countries, thereby assisting creators in knowing the requirements of Indian students. Since lots of colleges have shifted to the online mode of delivery due to COVID, analysis of existing educational videos becomes even more critical. This analysis will also help in the upcoming semesters, which are expected to be online as well. \section{Datasets Required} For this project, we used the YouTube Data API for obtaining several statistics for some of the most popular educational content distributors on YouTube. The various aspects and limitations of the API are described in the latter section. First, we describe a list of channels that we focus our analysis on and then we describe the data collected for each of them.\\ % \newpage \subsection{Channels} \begin{enumerate} \item \textbf{NPTEL}: NPTEL, founded by the Indian Institutes of Technology and the Indian Institute of Science, serves as the primary channel for hosting recorded lectures and tutorials from these premier institutes for undergraduate and masters students. The channel covers all introductory engineering courses along with other humanities and life sciences courses as well.\\ \item \textbf{MIT OpenCourseWare}: Managed by the Massachusetts Institute of Technology, this channel publishes study material from its undergraduate and graduate-level courses. It is the American counterpart of NPTEL.\\ \item \textbf{Khan Academy}: This channel offers free online courses for Intermediate and High School students. Unlike the others, it does not cater to the undergraduate or graduate audience.\\ \iffalse \item \textbf{Coursera}: Coursera, started by Stanford professors, is well renowned as the hotspot of all online learning. It contains a mix of high school, undergraduate and graduate-level courses. Other than this, it also offers specializations, degrees, professional and master track courses on several topics assembled from different institutes worldwide.\\ \item \textbf{Udemy}: Udemy is another American MOOC (Massive Open Online Course) provider similar to Coursera.\\ \fi \item \textbf{Unacademy JEE}: Unacademy is another Indian channel targeting high school students preparing for several entrance examinations in Engineering, Medical and other streams.\\ \item \textbf{Study IQ Education}: This is another Indian channel providing short online classes covering general topics for students preparing for the UPSC entrance examinations.\\ \item \textbf{Physics Wallah}: This is an Indian channel started by Alakh Pandey which provides free online Physics courses meant explicitly for High School students in India preparing for the Joint Entrance Examinations.\\ \end{enumerate} A figurative analysis of the number of playlists and the number of videos uploaded by each channel is provided below in figure \ref{fig:channel_info} \footnote{Data obtained as of November, 2020}. \begin{figure}[!htp] \centering \includegraphics[scale = 0.50]{images/videos_playlist_count.png} \caption{Comparison of number of videos and playlists uploaded by different channels} \label{fig:channel_info} \end{figure} \Floatbarrier \newpage \section{Data Retrieval: Youtube Data API} For each of the channels stated above, we collect data for all videos of the channel on youtube. This includes : \begin{itemize} \item Title \item Likes/Dislikes \item Comments \item Views \item Video Description \item Tags \item Upload date \item Video Length \end{itemize} Below is a short description on APIs used to populate the data. \begin{enumerate} \item \textbf{Channels API\footnote{\url{https://developers.google.com/youtube/v3/docs/channels}}:} We use this API to find all `uploads' of the channel. \item \textbf{Playlists API\footnote{\url{https://developers.google.com/youtube/v3/docs/playlists}}:} Using this API we can get ids of all the playlists which are part of a particular channel. These playlists mostly represent course playlists in NPTEL/MIT OCW. \item \textbf{PlaylistItems API\footnote{\url{https://developers.google.com/youtube/v3/docs/playlistItems}}:} Using this API, we get \textit{videoId} for all videos which are part of the playlist. \item \textbf{Videos API\footnote{\url{https://developers.google.com/youtube/v3/docs/videos}}:} After we have \textit{videoId}'s for all the videos (uploads by channel or part of playlist). We get the data for each video which includes Title, Likes, Views, etc. This API does not provide comments. \item \textbf{CommentThreads API\footnote{\url{https://developers.google.com/youtube/v3/docs/commentThreads}}:} Using this API we get Comments for a particular video on youtube. \end{enumerate} \subsection{Data Indexing} The data after extraction is stored (or indexed) in multiple JSON files. We use these JSON files to apply mining techniques to obtain useful insights from the extracted data. This section explains the structure of the JSON files. \begin{itemize} \item \texttt{video\_ids.json}: This file contains video IDs of all videos present inside the above listed six YouTube channels. \item \texttt{all\_playlist.json}: This file contains playlists and videos ids for videos which are part of that playlist for each channel. \item \texttt{all\_videos.json}: This file contains map of video IDs and data for the corresponding video. Data includes: Title, Likes/ Dislikes count, Views, Video Description, Title, etc. Note that this data does not contain comments. \item \texttt{comments.json}: This file contains the comments text and likes on that comment, for all videos. We have ignored videos where comment count was less than 5 and we have only scrapped maximum of 50 comments per video. \end{itemize} Aside from the above files which we have scraped using the YouTube Data API, we have some processed files in the /data folder (For example, after applying sentiment analysis, clustering etc.) % ** likeCount viewCount commentCount % likeCount 1.000000 0.810523 0.935411 % viewCount 0.810523 1.000000 0.783737 % commentCount 0.935411 0.783737 1.000000 % ** physics_wallah likeCount viewCount commentCount % likeCount 1.000000 0.927984 0.896052 % viewCount 0.927984 1.000000 0.783352 % commentCount 0.896052 0.783352 1.000000 % ** nptel likeCount viewCount commentCount % likeCount 1.000000 0.902254 0.812515 % viewCount 0.902254 1.000000 0.779864 % commentCount 0.812515 0.779864 1.000000 % ** mit likeCount viewCount commentCount % likeCount 1.000000 0.912579 0.851227 % viewCount 0.912579 1.000000 0.817555 % commentCount 0.851227 0.817555 1.000000 % ** khan_academy likeCount viewCount commentCount % likeCount 1.000000 0.737639 0.891568 % viewCount 0.737639 1.000000 0.683877 % commentCount 0.891568 0.683877 1.000000 % ** unacademy_jee likeCount viewCount commentCount % likeCount 1.000000 0.947753 0.914989 % viewCount 0.947753 1.000000 0.852628 % commentCount 0.914989 0.852628 1.000000 % ** study_iq_education likeCount viewCount commentCount % likeCount 1.000000 0.947228 0.825841 % viewCount 0.947228 1.000000 0.843698 % commentCount 0.825841 0.843698 1.000000 % (IITK, CSE, 528, 7114.051, 26.159, 0.948) % (IITD, CSE, 503, 44451.089, 107.056, 0.943) % (IITB, CSE, 188, 58259.101, 132.601, 0.939) % (IITM, CSE, 530, 42674.226, 123.460, 0.938) % (IITKGP, CSE, 764, 38803.469, 92.077, 0.937) % (IITM, ME, 845, 11474.535, 55.231, 0.964) % (IITK, ME, 804, 23718.689, 76.672, 0.962) % (IITB, ME, 403, 30586.285, 95.814, 0.960) % (IITKGP, ME, 1059, 31227.553, 119.791, 0.957) % (IITD, ME, 228, 32261.667, 113.474, 0.940) % (IITK, EE, 554, 20808.715, 65.722, 0.960) % (IITD, EE, 664, 52339.633, 129.443, 0.953) % (IITM, EE, 1088, 41959.692, 134.225, 0.953) % (IITB, EE, 904, 25270.758, 67.833, 0.944) % (IITKGP, EE, 935, 35999.903, 97.142, 0.937) % (IITB, CHE, 155, 4378.987, 24.690, 0.963) % (IITK, CHE, 121, 10061.661, 38.372, 0.962) % (IITM, CHE, 392, 6379.497, 34.115, 0.960) % (IITKGP, CHE, 280, 6292.718, 22.954, 0.959) % (IITD, CHE, 40, 1345.050, 5.025, 0.905) % \section{Methodology and Analysis} % From the data populated using youtube API. We employ various data mining methods on this data to get interesting insights. % \begin{itemize} % \item \textbf{Video Description}: % \begin{enumerate} % \item Using named entity recognition algorithms, we aim to extract fields like college name, department and course topics which will be further useful for aggregating and comparing results for each department/subject. % \item We can rank each topic of college based on the aggregated number of views/likes. % \end{enumerate} % \item \textbf{Comments}: % \begin{enumerate} % \item We propose to use a sentiment analysis model to classify each comment as positive, neutral or negative. Based on this, we can calculate a total polarity score of all the comments. Using this, we plan to use correlation analysis to study the relation of the polarity of comments with other properties such as likes, views, topic etc. % \item For NPTEL, we expect a significant proportion of comments to be in mixed language (such as Hindi-English). To ensure proper analysis of such comments, we will use a multilingual model for embeddings, which have been proven to work well on such texts \cite{qin2020cosdaml}. % \end{enumerate} % \item \textbf{Correlation Analysis}: % \begin{enumerate} % \item We plan to use correlation analysis to find the relationship between variables. For example, the trend of the polarity of comments with views/likes, number of views with likes, etc. % \item For this, we will use Chi-square test \cite{chisquare} or Pearson correlation. % \end{enumerate} % \item \textbf{Identifying Playlist (or course) peaks and troughs}: % \begin{enumerate} % \item We can decide the peaks and troughs in playlists by seeing which videos have maximum and minimum z-score (i.e. deviating most from the mean) according to the distribution of views/likes. % \item We will rank the videos in a playlist according to z-score and will identify whether a topic appears as a peak in one channel and trough in another. % \end{enumerate} % \end{itemize} % \section{Expected Outcomes} % At the conclusion of the project, we aim to achieve the following results pertaining to our analyses performed on the different educational channels listed above: % \begin{itemize} % \item \textbf{Popularity of Courses}: We want to be able to benchmark and compare the popularity of different course topics across different channels. This will enable us to draw parallels between the course contents of similar courses on different channels and their corresponding responses from the audience in terms of views, comments and likes. % \item \textbf{Insights about colleges}: Insights about the topics taught at different colleges such as \textit{``IIT Kanpur has highly positive reviews for Mathematics related courses"}, \textit{``IIT Bombay offers lots of courses from its Electrical Engineering department"} % \item \textbf{Hotspot topics in a course (playlist)}: Analyzing the playlist of individual courses, we will find the topics which are popular in that particular course. % \item \textbf{Relation between metadata}: From our analysis, we expect that we will observe a positive correlation between variables. For example, the trend of the polarity of comments with views/likes, number of views with likes, etc. % \item \textbf{Optimal Video Length}: From our analysis, we aim to determine the ideal length of a lecture/tutorial video that garners the best response from the students. It has been proposed that shorter educational videos are preferred. This fact needs to be verified. % \item \textbf{Suggestions about course offering:} By comparing the video ratings for topics on NPTEL and foreign channels, we will suggest which courses should be offered by NPTEL in the future. % \end{itemize} \section{Methodology} \subsection{Video Length} We carried out an analysis on the variation of parameters such as number of views, number of likes and number of comments with the length of video (in seconds). This analysis will be useful for educational content creators to decide the optimal length of their videos such that it gains maximum reach and/or is monetarily profitable. To understand this variation with the length of video, we plotted bar-plots and box-plots with the number of likes/views/comments on the Y-axis and length of video in X-axis. For this we used the data contained in the file \texttt{all\_videos.json}, which contains all these parameters for each of the videos used by us. The results for this are shown in section \ref{video_length} \subsection{Correlation between Frequency of Likes, Views and Comments} The number of views, likes or comments gathered by a YouTube video is indicative of a video's reachability and its likeness to the public. It is important to understand the correlation between these attributes for understanding the YouTube algorithm better. Its also not straightforward to identify the trend, since we see quite often that videos that are highly viewed might also contain a lot of dislikes. We use the Spearman rank correlation method for finding the pairwise degree of association between the three variables. The results for this are described at Section \ref{correlation}. \subsection{College Ranking Analysis} % To perform college specific analysis. Including relative comparison of quality of videos uploaded by each college. And performing branch-wise comparison between different colleges. To perform this analysis I use videos uploaded on nptel's youtube channel. We also tried performing college-specific analysis by comparing the videos uploaded by each college to NPTELHRD's channel. However, the videos on the channel aren't separated by universities directly. So, we used the metadata from each youtube video on the channel, and separated them college wise. We built a named entity recognition (NER) model to give college tags to the videos. This NER model uses parts of speech (POS) tagging, which helps us identify nouns in the video description then we perform similarity matching on these nouns with college names. We used this similarity matching to account for videos uploaded with a spelling mistake in the name of the college. This method took care of those spelling mistake, and other noises present due to human error. The extracted videos for each college are further tagged with branch names. We again use the same NER model, but this time we are performing similarity matching with branch names. After video filtering several analysis were performed on the metadata from these videos. The results and discussion for this are shown in section \ref{collge_rank} \subsection{Playlist Retention} To find how the views decline as a course (playlist) progresses, we start from the 3rd video of the course. This is because the first two courses have lots of non-serious views. We then find the percentage of views (compared to the third video) as the course progresses. This signifies the retention of students in a course. Then, we find the median of these percentages at each position in a playlist to get the general trend of playlist progression. This exercise was done for multiple channels to get an idea of viewer retention across them. We also tried to find the optimal length of a playlist by trying to use the length of playlist, views and likes for each video in a playlist and relative changes in these numbers. However, we could not find any optimal length. Most playlists lose viewers exponentially, and shorter playlists proved to be better as expected. \subsection{Ranking of Topics} \label{method_ranking_topics} Using the playlist data we obtained for the channels \textbf{NPTELHRD} and \textbf{MIT OpenCourseWare}, we create a ranking of the most popular playlist courses. This involved two steps: \begin{enumerate} \item Clustering of Topics: Playlist Titles were clustered into topics (where multiple similar playlist titles were grouped into one single topic) using similarity matching and POS-Tagging. In general, one clustered topic contains playlist courses created for different iterations of the same course or other courses with similar content.\\ \item Ranking based on views: We then used max aggregation to get the average views for each topic by aggregating the average views of the individual playlist courses for that topic. \\ \end{enumerate} The results and discussion for this are shown in section \ref{topics_ranking} \subsection{ Views peak analysis on playlists} To find peaks in view count of a playlist, we find the relative change in view count for each video as the playlist progresses. We then compare this with videos in other playlists at the same position as the current video. Then, we calculate the z-score of this relative change for each video to find anomalies in the playlist. We then sort all videos in all playlists by their Z-scores to find the biggest anomalies across all playlists. \subsection{Classifying Comments based on sentiment polarity} We use python's TextBlob \cite{textblob} API to perform sentiment analysis on comments. This gives a polarity score in range $[-1,1]$ to each comment. Using the assigned polarity, we classified the comments into three classes, namely positive, neutral and negative. The threshold for finding the class was decided as follows: \begin{itemize} \item \texttt{polarity $>$ 0.1}: Positive \item \texttt{$-$0.1 $\leq$ polarity $\leq$ 0.1}: Neutral \item \texttt{polarity $<$ -0.1}: Negative \end{itemize} We analyzed the number of comments found for each type (positive, negative and neutral) for the channels NPTELHRD and MIT Open Courseware. We chose these channels because these contained a large enough number of comments to come to a statistically significant conclusion. \section{Results and Discussion} \subsection{Video Length} \label{video_length} \subsubsection{Views} To get a better idea of the correlation of views on a video and the duration of the video, we plotted the view-count against the video length for all the videos we fetched (see figures \ref{fig:views_video_length_1} and \ref{fig:views_video_length_2}). % \begin{figure}[hbt!] % \centering % \parbox{7cm}{ % \includegraphics[width=7cm]{images/view_length_bar.png} % \caption{Bar plot of view and video length} % \label{fig:1figsA}} % \qquad % \begin{minipage}{5cm} % \includegraphics[width=7cm]{images/view_length_box.png} % \caption{Box plot of view and video length} % \label{fig:1figsB} % \end{minipage} % \end{figure} \begin{figure}[!htpb] \centering \subfloat[\centering Bar plot of views and video length]{{\includegraphics[width=0.49\textwidth]{images/global_optimal_viewCount_bar.png}} \label{fig:views_video_length_1}} \hspace{-2em}% \qquad \subfloat[\centering Box plot of views and video length]{{\includegraphics[width=0.49\textwidth]{images/global_optimal_viewCount.png}}\label{fig:views_video_length_2}}% % \caption{Box plot of view and video length}% \caption{Variation of views with video length}% \end{figure} \FloatBarrier % \includegraphics[width=16cm, height=10cm]{images/view_length_bar.png}\\ % Fig : Bar plot of view count vs video length \noindent We also used the box plot to depict groups of numerical data through their quartiles graphically. % \includegraphics[width=16cm, height=10cm]{images/view_length_box.png}\\ % Fig : Box plot of view count vs video length \noindent We observed that although the shorter videos fared badly with reference to the views they were able to attract, the views increased rapidly with an increase in length. We observed peaks around the \textbf{10 minute} mark and \textbf{25 minute} mark. We also observed that videos longer than \textbf{10 minutes} gathered considerably more views than shorter videos. % \noindent We believe a viewer prefers to watch videos with length greater than 10 minutes for educational content. Since, it's difficult to actually teach and grasp anything in videos smaller than that. (NEED TO BE REFINED: SNEHAL). The constant trend for videos with length greater than 10 minutes can again be attributed to this. We attribute the preference to longer videos with the inherent abstrusity of educational content. Moreover, in contrast to content comedy/gaming/entertainment content, we didn't observe any negative trend with respect to increasing video length. \subsubsection{Likes and Comments} We also drew a correlation of analytical metrics like likes on the video or the number of comments on a video with the video length (see figures 3 and 4). We observe a peak for educational videos with length around \textbf{10 minutes}, but the like and comment count gradually decreases with increasing video length. We observed steep decrease in the metrics at around \textbf{15 minute mark}, \textbf{20 minute mark} and \textbf{25 minute mark}. This shows that even though videos longer than 10 minutes help increase the number of clicks to the videos, having longer videos decreases the number of likes and comments, which to some extent shows a dip in user satisfaction. % \begin{figure}[hbt!] % \centering % \parbox{7cm}{ % \includegraphics[width=7cm]{images/like_length_bar.png} % \caption{Bar plot of like and video length} % \label{fig:2figsA}} % \qquad % \begin{minipage}{5cm} % \includegraphics[width=7cm]{images/like_length_box.png} % \caption{Box plot of like and video length} % \label{fig:2figsB} % \end{minipage} % \end{figure} \begin{figure}[!htpb] \centering \subfloat[\centering Bar plot of likes vs video length]{\includegraphics[width=0.49\textwidth]{images/global_optimal_likeCount_bar.png}\label{fig:likes_video_length_1}} \hspace{-2em}% \qquad \subfloat[\centering Box plot of likes vs video length]{\includegraphics[width=0.49\textwidth]{images/global_optimal_likeCount.png}\label{fig:likes_video_length_2}} \\% \caption{Variation of count of likes with Video Length} % \caption{Box plot of view and video length}% % \end{figure} \FloatBarrier % \begin{figure}[hbt!] % \centering % \parbox{7cm}{ % \includegraphics[width=7cm]{images/comment_length_bar.png} % \caption{Bar plot of comment and video length} % \label{fig:2figsA}} % \qquad % \begin{minipage}{5cm} % \includegraphics[width=7cm]{images/comment_length_box.png} % \caption{Box plot of comment and video length} % \label{fig:2figsB} % \end{minipage} % \end{figure} \begin{figure}[!htpb] \centering \subfloat[\centering Bar plot of comments vs video length]{\includegraphics[width=0.49\textwidth]{images/global_optimal_commentCount_bar.png}\label{fig:comment_video_length_1}} \hspace{-2em}% \qquad \subfloat[\centering Box plot of comments vs video length]{\includegraphics[width=0.49\textwidth]{images/global_optimal_commentCount.png}\label{fig:comment_video_length_2}}\\% % \caption{Box plot of view and video length}% \caption{Variation of count of comments with Video Length} \label{fig:comment_video_length}% \end{figure} \FloatBarrier \subsection{Correlation between Frequency of Likes, Views and Comments} \label{correlation} \begin{figure}[!htpb] \centering \includegraphics[width=9cm]{images/correlation_heatmap.png}% \caption{Spearman Correlation Heatmap between LikeCount, ViewCount, and CommentCount} % \caption{Box plot of view and video length}% \label{fig:spearman}% \end{figure} We see a strong positive correlation between all the pairs of variables. This is slightly different from other content types such as comedy/gaming/entertainment, where highly disliked videos also gather a large number of views (Figure \ref{fig:spearman}). \subsection{College Ranking Analysis} \label{collge_rank} To analyze the college wise distribution of educational content, we filtered videos for the top 5 universities according to QS world university ranking 2021. These were IIT Kanpur, IIT Madras, IIT Bombay, IIT Delhi, and IIT Kharagpur from NPTELHRD channel. We then proceeded with accumulating the branch wise data distribution of the same and took four branches into consideration, namely Computer Science \& Engineering (CSE), Electrical Engineering (EE), Mechanical Engineering (ME) and Chemical Engineering (CHE) (Figure \ref{fig:branch_video_counts}). We collected data for each channel's number of videos, and three evaluation metrics, namely average likes, average views, and the like/dislike ratio. % (JUST A ROUGH DESCRIPTION. NEEDS MODIFICATION. UMANG, KESHAV, AYUSH, SNEHAL) \begin{figure}[!htpb] \centering \includegraphics[width=15cm]{images/video_counts.png}% \caption{Branchwise NPTEL Videos for each college} % \caption{Box plot of view and video length}% \label{fig:branch_video_counts}% \end{figure} \subsubsection{Comparison with QS rankings} We tried to derive correlations between the data we collected and the QS World University Rankings (see table \ref{qs_ranking}). We found that channels with better ranks had greater average views on their channels. However, when it came to actually interacting with the content in terms of likes and comments, we observed that other universities fared better. For example, we observed the IIT Bombay has the highest average views when considering the videos related to the CSE department and IIT Delhi had the highest average views related to EE department videos, however when we observed the like/dislike ratio for both CSE and EE departments, we found IIT Kanpur to perform better. \begin{table}[!htpb] \parbox{.44\linewidth}{ \centering \begin{tabular}{|c|c|} \hline University & QS University Rankings 2021 \\ \hline IIT Bombay & Ranked 172 \\ IIT Delhi & Ranked 193 \\ IIT Madras & Ranked 275 \\ IIT Kharagpur & Ranked 314 \\ IIT Kanpur & Ranked 350 \\ \hline \end{tabular}\vspace{3mm} \caption{QS World rankings} \label{qs_ranking} } \hfill \parbox{.44\linewidth}{ \begin{tabular}{|c|c|} \hline University & Overall Average views \\ \hline \textbf{IIT Delhi} & \textbf{44962} \\ IIT Kharagpur & 32303 \\ IIT Bombay & 28365 \\ IIT Madras & 28184 \\ IIT Kanpur & 17723 \\ \hline \end{tabular}\vspace{3mm} \caption{Overall Average views} } \end{table} % \begin{table}[!] % \begin{center} % \begin{tabular}{ |c|c|c| c|c|c| } % \hline % College & Branch & Video Count & Average views & Average likes & Like/dislike ratio \\\hline % IITK & CSE & 528 & 7114.051 & 26.159 & 0.948\\ % IITD& CSE& 503& 44451.089& 107.056& 0.943\\ % IITB& CSE& 188& 58259.101& 132.601& 0.939\\ % IITM& CSE& 530& 42674.226& 123.460& 0.938\\ % IITKGP& CSE& 764& 38803.469& 92.077& 0.937\\ % \hline % IITM& ME& 845& 11474.535& 55.231& 0.964\\ % IITK& ME& 804& 23718.689& 76.672& 0.962\\ % IITB& ME& 403& 30586.285& 95.814& 0.960\\ % IITKGP& ME& 1059& 31227.553& 119.791& 0.957\\ % IITD& ME& 228& 32261.667& 113.474& 0.940\\ % \hline % IITK& EE& 554& 20808.715& 65.722& 0.960\\ % IITD& EE& 664& 52339.633& 129.443& 0.953\\ % IITM& EE& 1088& 41959.692& 134.225& 0.953\\ % IITB& EE& 904& 25270.758& 67.833& 0.944\\ % IITKGP& EE& 935& 35999.903& 97.142& 0.937\\ % \hline % IITB& CHE& 155& 4378.987& 24.690& 0.963\\ % IITK& CHE& 121& 10061.661& 38.372& 0.962\\ % IITM& CHE& 392& 6379.497& 34.115& 0.960\\ % IITKGP& CHE& 280& 6292.718& 22.954& 0.959\\ % IITD& CHE& 40& 1345.050& 5.025& 0.905\\ % % 128 & 7.63078 \\ % % 256 & 57.0212\\ % \hline % \end{tabular}\vspace{3mm} % \caption{Branch wise comparison across five IITs} % \end{center} % \end{table} % \begin{table}[!htpb] % \begin{center} % \begin{tabular}{|c|c|} % \hline % University & QS World University Rankings 2021 \\ \hline % IIT Bombay & Ranked 172 \\ % IIT Delhi & Ranked 193 \\ % IIT Madras & Ranked 275 \\ % IIT Kharagpur & Ranked 314 \\ % IIT Kanpur & Ranked 350 \\ % \hline % \end{tabular}\vspace{3mm} % \caption{QS World rankings} % \label{qs_ranking} % \end{center} % \end{table} % \begin{table}[!htpb] % \begin{center} % \begin{tabular}{|c|c|} % \hline % University & Overall Average views \\ \hline % \textbf{IIT Delhi} & \textbf{44962.98} \\ % IIT Kharagpur & 32303.39 \\ % IIT Bombay & 28365.15 \\ % IIT Madras & 28184.32 \\ % IIT Kanpur & 17723.73 \\ \hline % \end{tabular}\vspace{3mm} % \caption{Overall Average views} % \end{center} % \end{table} \begin{table}[!htpb] \parbox{.40\linewidth}{ \centering \begin{tabular}{ |p{1.5em}|p{2em}|p{2em}|p{2em}|p{2em}|p{2.7em}| } \hline & IITB & IITD & IITK & IITM & IITKGP \\\hline CSE & \textbf{58259} & 44451 & 7114 & 42674 & 38803\\ EE & 25270 & \textbf{52339} &20808 & 41959 & 35999\\ ME & 30581 & \textbf{32261} & 23718 & 11474 & 31227\\ CHE &4378 & 1345 & \textbf{10061} &6379 & 6292\\ \hline \end{tabular}\vspace{3mm} \caption{Table for Average Views} } \hfill \parbox{.40\linewidth}{ \centering \hspace{-3em} \begin{tabular}{|p{1.5em}|p{1.7em}|p{1.7em}|p{1.7em}|p{1.7em}|p{2.7em}|} \hline & IITB & IITD & IITK & IITM & IITKGP \\\hline CSE & 93.9 & 94.3 & \textbf{94.8} & 93.8 & 93.7\\ EE & 94.4 & 95.3 & \textbf{96.0} & 95.3 & 93.7\\ ME & 96 & 94 & 96.2 & \textbf{96.4} & 95.7\\ CHE & \textbf{96.3} & 90.5 & 96.2 & 96 & 95.9\\ \hline \end{tabular}\vspace{3mm} \caption{Table for like percentage} } \end{table} % \begin{table} % \begin{center} % \begin{tabular}{ |c|c|c|c|c|c| } % \hline % & IITB & IITD & IITK & IITM & IITKGP \\\hline % CSE & \textbf{33173.0}& 8281.0 & 2654.0 & 20233.0 & 16921.0\\ % EE & 11400.0 & \textbf{28339.5} &12296.0 & 15035.0& 18511.0\\ % ME & 9737.0 & \textbf{16559.5} & 3448.0& 4196.0 & 10778.0\\ % CHE &2525.0 & 786.0 & \textbf{7204.0} &3046.5 & 1505.0\\ % \hline % \end{tabular}\vspace{3mm} % \caption{Table for Median views} % \end{center} % \end{table} \newpage \subsection{Playlist Retention} We performed viewer retention analysis on the Youtube API. We observed a decline in the views as the playlist progresses We also perform retention analysis between channels with High school/JEE content (Khan Academy, Unacademy) and channels with college-level content (NPTEL/MIT OCW) % \begin{figure}[H] % \centering % \subfloat[\centering NPTEL and MIT OCW ]{{\includegraphics[width=16cm, height=10cm]{images/retention_nptel_vs_mit.png}}}% % \end{figure} % \begin{figure}[H] % \subfloat[\centering College and JEE channels]{{\includegraphics[width=16cm, height=10cm]{images/retention_college_vs_jee.png}}}% % \caption{Decline of Views Comparison (from 3rd video)}% % \label{fig:example}% % \end{figure} % \FloatBarrier \begin{figure}[!htpb] \centering \hspace*{-2.5em}\subfloat[\centering NPTEL and MIT OCW ]{\includegraphics[width=0.59\textwidth]{images/retention_nptel_vs_mit.png} \label{fig:img1}} \hspace*{-4em} \qquad \subfloat[\centering College and JEE channels]{\includegraphics[width=0.59\textwidth]{images/retention_college_vs_jee.png} \label{fig:img2}}\\ \label{fig:example}% \caption{Decline of Views Comparison (from 3rd video)} \end{figure} \FloatBarrier % \begin{figure}[!htpb] % \centering % \includegraphics[scale=0.5]{images/fall_of_views.png} % \caption{Comparison of fall of popularity among top topics} % \label{fig:my_label} % \end{figure} % \FloatBarrier The above graphs provide exciting insights about the retention between channels. We assume that a channel's content has better retention capacity if the decrease in the views with an increase in video numbers is less steep. In the graph, we observe that the viewership of MIT videos decreases more rapidly than the decrease in viewership of NPTEL's videos. Thus, we observe better retention in NPTEL's case. One possible reason for the increased retention in case of NPTEL can be attributed to the fact that many of the NPTEL courses involve a certification which incentivizes watching videos. The second graph above shows that the curve for channels with JEE content always remains above the College content curve. This means that the videos/lectures uploaded for university students see a steeper decrease in viewership with the increase in videos. This is a fascinating result. We believe that this is because of \begin{itemize} \item Target audience for channels with JEE content is the general youtube audience, whereas the target audience for University-based channels are University students. We hypothesize that educational organizations like Khan academy, Unacademy, etc. have focused more on keeping their viewers engaged and consequently on the ease of understanding. In contrast, university-based channels have focus more on delivering lectures and teaching the contents of the course. \item We also attribute the steeper decrease in viewership for the college-based content to the audience's attention span and diligence. It is possible that a viewer watches the content more seriously on JEE related channels compared to viewers on college content related channels. This can be because high school students are more serious while studying for JEE and watch these videos more thoroughly. Whereas in college, a lot of students develop a more care-free attitude and thus are less motivated. \end{itemize} % \newpage \subsection{Ranking of topics} \label{topics_ranking} % \subsubsection{By views} % Using the playlist data we obtained for the channels \textbf{NPTELHRD} and \textbf{MIT OpenCourseWare}, we create a ranking of the most popular playlist courses. This involved two steps: % \begin{enumerate} % \item Clustering of Topics: Playlist Titles were clustered into topics (where multiple similar playlist titles were grouped into one single topic) using similarity matching and POS-Tagging. Usually, one clustered topic contained playlist courses created for different iterations of the course or other courses with similar content.\\ % \item Ranking based on views: We then used max aggregation to get the average views for each topic by aggregating the average views of the individual playlist courses for that topic. This resulted in the ranklist as shown below.\\ % \end{enumerate} \noindent The topics generated after clustering (as explained in section \ref{method_ranking_topics}) are then ranked based on the average viewership. Below are some ranking tables and the statistical graphs that we obtained: \begin{table}[!htpb] \centering \begin{tabular}{|c|c|c|c|c|} \hline \textbf{Rank} & \textbf{Topic} & \textbf{Department} & \textbf{Avg Views} & % \textbf{Avg Likes} & \textbf{Avg Disikes} & \textbf{Avg Comments} & \textbf{\#Videos}\\ \hline 1 & Fundamentals of Operations Research & Mechanical & 223591 & 22\\ 2 & Basic Electrical Technology & Electrical & 200650 & 39\\ 3 & Design of Reinforced Concrete Structures & Civil & 200482 & 70\\ 4 & Basic Electrical Circuits & Electrical & 169476 & 101\\ 5 & Data Structures and Algorithms & Computer Science & 169452 & 36\\ 6 & Thermodynamics & Mechanical & 149631 & 72\\ 7 & Programming and Data Structure & Computer Science & 112309 & 32\\ 8 & Advanced Operations Research & Mechanical & 108464 & 39\\ 9 & Convective Heat and Mass Transfer & Mechanical & 106719 & 122\\ 10 & Biochemistry I & Biotechnology & 105999 & 28\\ \hline \end{tabular}\vspace{3mm} \caption{Top 10 popular NPTEL topics} \label{tab:my_label} \end{table} \Floatbarrier \begin{table}[!htpb] \centering \begin{tabular}{|c|c|c|c|c|} \hline \textbf{Rank} & \textbf{Topic} & \textbf{Department} & \textbf{Avg Views} & % \textbf{Avg Likes} & \textbf{Avg Disikes} & \textbf{Avg Comments} & \textbf{\#Videos}\\ \hline \multirow{2}{*}{1} & Introduction to Computer Science \& & \multirow{2}{*}{Computer Science} & \multirow{2}{*}{384292} & \multirow{2}{*}{100}\\ & Programming in Python & & &\\ 2 & Linear Algebra & Mathematics & 303443 & 110\\ 3 & Introduction to Probability & Mathematics & 266535 & 313\\ 4 & Listening, Speaking & English & 254899 & 4\\ \multirow{2}{*}{5} & Topics in Mathematics \& & \multirow{2}{*}{Mathematics} & \multirow{2}{*}{234844} & \multirow{2}{*}{24}\\ & Applications in Finance & & &\\ 6 & Quantum Physics III & Physics & 212147 & 266\\ 7 & Artificial Intelligence & Computer Science & 205565 & 30\\ \multirow{2}{*}{8} & Introduction to Computational Thinking & \multirow{2}{*}{Computer Science} & \multirow{2}{*}{190149} & \multirow{2}{*}{15}\\ & \& Data Science & & &\\ 9 & Single Variable Calculus & Mathematics & 181583 & 35\\ 10 & H/W Help for Multivariable Calculus & Mathematics & 177010 & 192\\ \hline \end{tabular}\vspace{3mm} \caption{Top 10 popular MITOCW topics} \label{tab:my_label} \end{table} \Floatbarrier \begin{figure}[!htpb] \centering \hspace*{-2.5em}\subfloat[\centering Views distribution for NPTEL topics]{\includegraphics[width=0.59\textwidth]{images/nptel_views_distr.png} \label{fig:img1}} \hspace*{-4em} \qquad \subfloat[\centering Views distribution for MITOCW topics]{\includegraphics[width=0.59\textwidth]{images/mitocw_views_distr.png} \label{fig:img2}}\\ \label{fig:example}% \caption{Views distribution comparison} \end{figure} \FloatBarrier \begin{figure}[!htpb] \centering \includegraphics[scale=0.5]{images/fall_of_views.png} \caption{Comparison of fall of popularity among top topics} \label{fig:my_label} \end{figure} \FloatBarrier Several important inferences can be drawn from the above tables, graphs and barplots: \begin{itemize} \item Among the top 10 topics for NPTEL, the vast majority of views ($\sim$62.5\%) comes from only the first 3-5 topics, while for MITOCW this percentage is 58.6\%.\\ \item We also observe that the fall of popularity among the top 10 topics is steeper for MITOCW than for NPTEL. In other words, only the first few topics of MITOCW are very popular.\\ \item We see a difference of focus/perspective for both the channels. While NPTEL's popular courses are dominated by Computer Science, Mechanical and Electrical, covering all major domains of Engineering, MITOCW's top courses mainly consist of Math and Computer Science courses.\\ \item In the top 100 list of topics obtained for MITOCW, we found at least 5 different topics relating to Finance/Economics (\textbf{Topics in Mathematics w Applications in Finance}, \textbf{Finance Theory I}, \textbf{Principles of Microeconomics}, \textbf{Development Economics: Macroeconomics}, \textbf{Poker Theory and Analysis}) while for NPTEL, there were almost none. We can conclusively say that NPTEL should publish more playlists focused towards the Economics and Financial domain.\\ \end{itemize} We also created a mapping of similar/common topics among playlists of both the channels. This is tabularised below. \begin{table}[!htpb] \centering \begin{tabular}{|c|c|c|c|} \hline \textbf{Rank} & \textbf{NPTEL Playlist Topic} & \textbf{Rank} & \textbf{MITOCW Playlist Topic}\\ \hline 5 & Data Structures and Algorithms & 13 & Introduction to Algorithms\\ 79 & Linear Algebra & 2 & Linear Algebra\\ 32 & Artifical Intelligence & 7 & Artificial Intelligence\\ 4 & Basic Electrical Circuits & 18 & Circuits and Electronics\\ 6 & Thermodynamics & 23 & Thermodynamics and Kinetics\\ 14 & Quantum Physics & 6 & Quantum Physics III\\ 17 & Advanced Digital Signal Processing & 42 & Digital Signal Processing\\ \hline \end{tabular}\vspace{3mm} \caption{Mapping of similar topics from NPTEL and MITOCW} \label{tab:my_label} \end{table} \FloatBarrier \noindent We find a couple of outliers in the mapping of topics shown above. In particular, Linear Algebra, being the basic foundation stone of various fields in Mathematics, Physics, and Computer Science, is highly popular in MITOCW's playlist. But the same course by NPTEL hasn't observed enough popularity. The same goes for the Artificial Intelligence course. It hence can be suggested that the existing playlists on NPTEL for Linear Algebra and Artificial Intelligence should be revised to better match the requirements of the current generation of research and development.\\ \subsection{Analysis of peaks of views in a playlist} We did peak analysis on playlists using views gained by each video. We used anomaly detection for the task. Using the z-score as the metric, we look at the values where the curve diverges from that video's predicted values in the playlist. Below are a few graphs that we obtained. \begin{figure}[!htpb] \centering \subfloat[\centering Famous lecturers ]{\includegraphics[width=0.51\textwidth ,height=5.5cm]{images/peak_graph2.png} \label{fig:img1}} \hspace{-3.5em}% \qquad \subfloat[\centering Interesting Title ]{\includegraphics[width=0.51\textwidth,height=5.5cm]{images/peak_graph3.png} \label{fig:img2} }\\% \caption{Peak Analysis graphs - 1} \end{figure} \begin{figure}[!htpb] \centering \subfloat[Popular course topic: AJM]{\label{fig:a}\includegraphics[width=0.50\textwidth,height=5.5cm]{images/peak_graph1.png} \label{fig:img3}} \hspace{-2.5em}% \qquad \subfloat[Popular course topic: Mechanical Vibration]{\includegraphics[width=0.50\textwidth,height=5.5cm]{images/peak_graph4.png} \label{fig:img4}}\\% \caption{Peak analysis graphs - 2} \end{figure} \FloatBarrier After observing peaks on playlists, we closely looked at the playlist content and peak video content to investigate the reason behind the peaks. Below are a few interesting insights that we obtained about the peak videos in the playlist. \begin{itemize} \item \textbf{Famous Guest lecturers:} We observed a sharp increase in view count for videos featuring special lecturers or famous guests. For an Aerospace course playlist on the MIT OCW channel, a sharp peak is observed for the video featuring \textbf{Lt Col Randy ``Laz'' Gordon} as a guest lecturer. Another instance is a lecture by \textbf{Abhijit Banerjee} \textbf{(see figure \ref{fig:img1}).} \item \textbf{Important and Difficult to understand topics:} In our analysis, we observed that a sharp peak came for videos having obtrusive content. Such videos might need multiplied re-watches and thus contributes to the peak that we observed. For example, \textbf{Mechanical Vibration} (course: Engineering Dynamics) and \textbf{Abrasive Jet Machining} (course: Advanced manufacturing processes) are important and challenging topics for which students refer online resources \textbf{(see figure \ref{fig:img3} and \ref{fig:img4}).} Not only do these peaks contribute to interesting findings from course data, but they also serve as insights to the instructors helping them focus more on topics that are tough to grasp by the students \item \textbf{Interesting titles:} We also found some peaks occurring because of interesting or tempting video titles. One such video titled "\textit{Puzzle 8: You Won't Want to Play Sudoku Again}" attracts the viewer and hence generate more views, compared to other videos on the playlists \textbf{(see figure \ref{fig:img2}).} Thus, including some videos with such titles might contribute to greater popularity of course playlists. \item \textbf{Common topics:} Videos on familiar topics and refresher videos in courses also gain large viewership compared to other videos. As these videos are fundamental in nature, they can cater to a wider variety of expertise and backgrounds. They are also some of the most used reference videos and thus attract more views than other videos of the course. \textbf{(For example, Probability refreshers in the middle of a course)} \end{itemize} \subsection{Classifying Comments based on sentiment polarity} The following table provides a few examples for assigning polarity to each comment - \begin{table}[!htbp] \begin{adjustbox}{width=\columnwidth,center} \begin{tabular}{|l|c|} \hline \multicolumn{1}{|c|}{\textbf{Comment Text}} & \textbf{Polarity Assigned} \\ \hline The best part of NPTEL is we will be certified from one best Institute (IITs). Very Informative. & 0.73 \\ \hline Is NPTEL courses only for engineering students \& engineering faculties? & 0 \\ \hline the voice breaks. annoying & -0.8 \\ \hline \end{tabular}\vspace{3mm} \end{adjustbox} \end{table} Looking at the data, we found that the neutral comments talked more about the course logistics rather than the content. On the other hand, positive and negative comments expressed more about the course content. What we found was the neutral comments were much more extensive in number than the negative comments. This observation tells us that more people were concerned about the course logistics (video quality, availability of slides, etc.) rather than the video's content. It further motivates spending more on increasing video quality rather than spending on creating better content. \begin{figure}[!htpb] \centering \includegraphics[width=8cm]{images/polarity_counts.png}% \caption{Frequency of Positive, Neutral and Negative comments} % \caption{Box plot of view and video length}% \label{fig:example}% \end{figure} \section{Conclusion} The analysis of variation of the number of likes/views/comments with video length tells us that very short educational videos are not favored much by the users. Also, while longer videos can still get a substantial number of clicks on them, to have a high number of likes and comments, it is imperative to keep the video length around the mark of 10-15 minutes. After comparing different IITs, IIT-D videos tend to be more popular, whereas IIT-KGP has published many videos across various topics. IIT Kanpur videos have good ratings, especially ones by CSE and EE departments. Almost 80\% students who reach till $3^{rd}$ video in a course drop till they reach the $20^{th}$ video. The overall retention of higher-education channels (NPTEL, MIT) is much lower than that of high-school channels. This can be attributed to better quality videos and higher seriousness of students preparing for JEE. Also, retention of NPTEL is better than MIT, possibly due to the certification involved. Popular courses in NPTEL span across more branches of engineering (CSE, EE, ME, CHE etc.), whereas those of MIT consist mostly of Maths and Computer Science. However, MIT has popular courses in non-engineering fields like Economics and Finance. Courses on Linear Algebra and Artificial Intelligence are highly popular on MIT but not on NPTEL, suggesting that IITs must revise these courses on NPTEL. Anomalies in playlists can be used to find topics for which students are referring to online resources. These topics are important but challenging to grasp, and course instructors must teach them with more attention. For example, Abrasive Jet Machining from course Advanced manufacturing processes. In the comments section, more people are concerned about the course logistics rather than course content. This indicates that NPTEL should focus on teaching and video quality rather than improving the contents of the course. % The analysis has put forward some interesting insights. We found video length of around 10 min is optimal for maximum engagement with the audience. Strong coorelation exists between likes, views and comments. QS rankings are more closely related to popularity rather than quality of content. \section{Future Directions} The Youtube Analytics API offers detailed information about videos which is only available to the owners of channel uploading the videos (NPTEL, MIT OCW etc. in this case). Aside from the analysis simply based on the number of views, we can use the Analytics API to see how individual sections of the videos are being watched, the average duration being watched, how many people are re-watching the videos, etc gain more insight into the topics. We can also use the demographics and location information of viewers to get interesting results. We can also compare education videos with other categories (gaming, music, entertainment) to get interesting comparisons and parallels. This comparison might give us insights on how to increase quality and boost the popularity of educational videos. \nocite{*} \bibliographystyle{acm} \bibliography{ref} \end{document}
{ "alphanum_fraction": 0.7421988033, "avg_line_length": 62.2747497219, "ext": "tex", "hexsha": "802b0645c51f73062829fd9bd0238a35a0506b35", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-01-23T15:15:35.000Z", "max_forks_repo_forks_event_min_datetime": "2020-12-06T11:34:28.000Z", "max_forks_repo_head_hexsha": "24e10383a6cecf473420eb0483cf96e98dd60692", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "keshav22bansal/youtube_video_analysis", "max_forks_repo_path": "report/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "24e10383a6cecf473420eb0483cf96e98dd60692", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "keshav22bansal/youtube_video_analysis", "max_issues_repo_path": "report/main.tex", "max_line_length": 581, "max_stars_count": null, "max_stars_repo_head_hexsha": "24e10383a6cecf473420eb0483cf96e98dd60692", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "keshav22bansal/youtube_video_analysis", "max_stars_repo_path": "report/main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 14850, "size": 55985 }
%%===================================================================== %% Conclusions %%===================================================================== \chapter{Conclusions} \label{chapter:conclusions} A current trend in database systems is that the history of the database contents must be accessible, in addition to the current state of the data set. The traditional single-version \Btree\ index can be straightforwardly extended so that multiversion data may be indexed by it, but the versioned extension is not efficient, as was shown in \chapref{chapter:mv-index}. The problem with this approach is the range-query action, which is inefficient because the data items with the same key but with different versions are clustered close to each other, while the data items with the same version but with different keys are not. % the single-key actions (key query, key insertion, and key deletion) on % a set of multiversion data can be implemented efficiently with a % single-version \Btree\ index Efficient indexing of the data set evolution therefore requires a multiversion index structure. When querying for any fixed version~$v$, an optimal multiversion index structure should be as efficient as a single-version index structure that only indexes the data items that are alive at version~$v$. We have defined this as the requirement for optimality of multiversion indexes (\defref{def:optimal-mv} in \secref{sec:mv-index:optimality}). This guarantees that range queries remain efficient even if the database accumulates a long history of updates. When data items are logically deleted, the range queries that target the latest committed version should become more efficient as fewer data items are alive. Range queries are an important operation in a general-purpose database system because index scans and joins are based on them. We have reviewed three of the most efficient multiversion index structures in \chapref{chapter:tsbmvbt}. These are the \TSBtree\ of Lomet and Salzberg~\cite{lomet:1989:tsb,lomet:1990:tsb-performance}, the multiversion \Btree\ (MVBT) of Becker et~al.~\cite{becker:1993:optimal,becker:1996:mvbt}, and the multiversion access structure (MVAS) of Varman and Verma~\cite{varman:1997:multiversion}. From these structures, only the MVBT is considered optimal by our definition of optimality. The problem with MVBT and MVAS is that they follow a single-update model, in which the update cannot be rolled back, and therefore these indexes cannot be used as general database indexes in a transactional multi-user environment. On the other hand, the \TSBtree\ does not have this restriction, but it does not guarantee any optimal bounds for the range-query performance, either. In particular, in the presence of logical deletions, the performance of the \TSBtree\ degrades because the leaf pages of the index structure are not merged. As an initial step, we have introduced transactions to the MVBT by redesigning it, as described in \chapref{chapter:tmvbt}. The redesigned transactional MVBT (TMVBT) index retains the optimal bounds of the MVBT and allows one updating multi-action transaction to operate concurrently with multiple read-only transactions. The TMVBT index is an efficient index structure that is usable on its own in situations where there is only a single source of updates, such as in data stream management systems. In \chapref{chapter:cmvbt} we presented the design of our concurrent multiversion \Btree\ (CMVBT) index which uses a separate main-memory-resident versioned \Btree~(VBT) index to store the pending updates created by active transactions, and a TMVBT index for indexing the data items inserted by committed transactions. Once an active transaction~$T$ has committed, a system maintenance transaction is run to apply the updates of~$T$ from the VBT index into the main TMVBT index. We say that a version~$v$ is stable, if all the updates of the transaction~$T$ that created the version~$v$ have been applied to the TMVBT index. The CMVBT index is thus optimal when querying for the data items of stable versions, and guarantees that the performance of the queries never degrades, even in the presence of deletions. The separate VBT index is kept small by constantly moving the updates of committed transactions into the TMVBT index. The VBT can be kept entirely in main memory during general transaction processing, and it does not incur any additional \abbr{I/O} operations. Our CMVBT algorithms are designed to work in a multi-user environment with multiple concurrent updating transactions. We allow transactions to roll back; either entirely, or up to a preset savepoint. Standard concurrency-control algorithms can be used to maintain logical data consistency. The snapshot isolation algorithms~\cite{berenson:1995:sql-critique} are especially well suited for use with our multiversion index structure, and they guarantee snapshot isolation for all transactions. Our algorithms are made recoverable by the ARIES recovery algorithm~\cite{mohan:1992:aries,mohan:1992:aries-im}, and we apply structure-modification operations on a level-by-level basis, performing each SMO as an atomic action that transforms a balanced index into another balanced index~\cite{jaluta:2003:recoverable,jaluta:2005:blink,jaluta:2006:page-server}. Because the commit-time version of a data item is not known when the transaction that created it is active, the data-item updates must initially be tagged with transaction identifiers, which are later changed to commit-time versions. The CMVBT index organization allows the data-item versions to be efficiently changed from transaction identifiers into commit-time versions when the maintenance transaction moves the updates from the VBT into the TMVBT index. This is a non-trivial issue that often requires special book-keeping arrangements in other multiversion index structures that support commit-time versioning. We have experimentally analyzed the performance of the CMVBT index in \chapref{chapter:performance} and compared it to the performance of the \TSBtree. The results we obtained from our experiments agree with what we expected from our analytical results. The CMVBT index structure performs on par with the \TSBtree\ index in standard transaction processing, but is more efficient for key-range queries. The efficiency of the key-range queries is especially apparent if the history of the database contains deletions. We have furthermore compared the combined CMVBT index to the TMVBT, and conclude that the separate VBT index does not affect the overall performance significantly. For completeness, we have also demonstrated that the performance of range queries degrades rapidly if the multiversion data items are indexed by a single-version \Btree\ index. There is a downside to the optimal performance of the TMVBT index; namely, that the index structure requires more space than the \TSBtree. While the asymptotic space complexity of all the compared index structures is the same, the size of the TMVBT (in itself, and as part of the CMVBT index) was \SIrange{10}{60}{\percent} greater than the size of the \TSBtree\ in our tests, depending on the \TSBtree\ splitting policy and on the number of deletions in the database history. Our conclusion is thus that the CMVBT structure is a good choice for a general-purpose multiversion index when performance is more important than storage space, especially so when it is expected that the history will also contain key deletions. The \TSBtree\ is a better choice when storage space is limited, particularly if historical data is to be stored on a tertiary storage, because the \TSBtree\ allows historical pages to be moved during time-splits.
{ "alphanum_fraction": 0.797542044, "avg_line_length": 53.6805555556, "ext": "tex", "hexsha": "ea01ed3094a18462dd41ba45fa84f9e930f0d699", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f35bec52a6cb17a8a2ad778730f5254d2dcfbd7d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "thaapasa/dissertation", "max_forks_repo_path": "08-conclusions.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f35bec52a6cb17a8a2ad778730f5254d2dcfbd7d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "thaapasa/dissertation", "max_issues_repo_path": "08-conclusions.tex", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "f35bec52a6cb17a8a2ad778730f5254d2dcfbd7d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "thaapasa/dissertation", "max_stars_repo_path": "08-conclusions.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1747, "size": 7730 }
\subsection{Variance of bootstrap estimators}
{ "alphanum_fraction": 0.8125, "avg_line_length": 12, "ext": "tex", "hexsha": "1640d367be46f33b9b72be2959fe2c2c00c1a03a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/statistics/bootstrap/01-02-bootstrap_variance.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/statistics/bootstrap/01-02-bootstrap_variance.tex", "max_line_length": 45, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/statistics/bootstrap/01-02-bootstrap_variance.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 11, "size": 48 }
% !TeX root = ../sustechthesis-example.tex \chapter{English Example} If your supervisor is a foreign resident, or if your supervisor or defense committee specifically allows writing in English, the thesis may be written in English as the primary language. Please check with your supervisor or department secretary to confirm if you can write in English. \section{Reference guide} Writing in English still requires the Chinese reference standard GB/T 7714-2015.
{ "alphanum_fraction": 0.8025751073, "avg_line_length": 42.3636363636, "ext": "tex", "hexsha": "e00a79dd55640908263bc17bcffba37ae7aa9dff", "lang": "TeX", "max_forks_count": 13, "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:04:16.000Z", "max_forks_repo_forks_event_min_datetime": "2021-04-14T08:29:58.000Z", "max_forks_repo_head_hexsha": "931fdac68be311152922dee689c631cebb31d468", "max_forks_repo_licenses": [ "LPPL-1.3c" ], "max_forks_repo_name": "Spj-Zhao/sustech-master-thesis", "max_forks_repo_path": "data/chap05.tex", "max_issues_count": 30, "max_issues_repo_head_hexsha": "931fdac68be311152922dee689c631cebb31d468", "max_issues_repo_issues_event_max_datetime": "2022-03-31T07:07:06.000Z", "max_issues_repo_issues_event_min_datetime": "2021-04-24T10:30:17.000Z", "max_issues_repo_licenses": [ "LPPL-1.3c" ], "max_issues_repo_name": "Spj-Zhao/sustech-master-thesis", "max_issues_repo_path": "data/chap05.tex", "max_line_length": 284, "max_stars_count": 38, "max_stars_repo_head_hexsha": "984d64d1b84ff17222ddc8f7420b47e4a713e6d2", "max_stars_repo_licenses": [ "LPPL-1.3c" ], "max_stars_repo_name": "SUSTech-CRA/sustech-master-thesis", "max_stars_repo_path": "data/chap05.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-31T10:29:51.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-28T17:05:28.000Z", "num_tokens": 93, "size": 466 }
\subsection{k-means} \begin{figure} \begin{minipage}{0.5\textwidth} \centering \includegraphics[width=.9\linewidth]{elbowds1.png} \caption{Timing curve vs silhouette coeff. dataset 1}\label{Fig:K-means vs silhouette coeff. dataset 1} \end{minipage}\hfill \begin{minipage}{0.5\textwidth} \centering \includegraphics[width=.9\linewidth]{elbowds2.png} \caption{Timing curve vs silhouette coeff. dataset 2}\label{Fig:K-means vs silhouette coeff. dataset 2} \end{minipage} \end{figure} The two clustering techniques I explored for these datasets are K-Means clustering and Gaussian Mixture Models (Expectation Maximization) In order to choose an appropriate $k$ to do k-means clustering, I used the elbow method\cite{developers_2020}. Unless otherwise specified the standard distance measure I used was Euclidean distance. Comparing all cluster cohesion metrics (mean distortion, silhouette coefficient, and Calinski-Harabasz score) here were the elbow results: \begin{center} \begin{tabular}{|c| c | c | c |} \hline & Metric & Cluster Count & Value \\ \hline \hline Dataset 1 & Avg Silhouette Coefficient & 4 & 0.558 \\ \hline Dataset 2 & Avg Silhouette Coefficient & 3 & 0.687 \\ \hline \end{tabular} \end{center} Looking at figure~\ref{Fig:K-means vs silhouette coeff. dataset 1} and figure~\ref{Fig:K-means vs silhouette coeff. dataset 2} you can see similar numbers in terms of the best "k". I chose silhouette coeff over the other cluster cohesion metrics because it has a bias towards preferring well separated clusters (coeffs closer to 1), which I anticipate would improve generalization and overall accuracy during a machine learning process where the clusters are inputs to the learner. Dataset 2 appears to have better separation between its 3 clusters than dataset 1's 4 clusters. Doing further silhouette analysis on DS1 reveals that the clusters when plotted on it to the surface do not appear to have a "cluster" round shape and instead are ovular, whereas data set two had similar characteristics to data set one except that cluster two is actually interspersed in between cluster zero and cluster one, however this was only for the first two features and others could have had better separation/shape. Both clustering methods On both data sets appeared to have two very strongly defined clusters, and then up to two additional weakly defined clusters. This more or less aligns with the fact that this is a binary classification problem and that samples could be placed into one of two clusters. The clusters were more well defined and well separated on the second dataset which also makes sense given that all supervised learning methods performed better in terms of accuracy on this dataset relative to the first. On closer inspection, the third and fourth clusters appear to be randomly dispersed throughout the dataset to capture outliers. These clusters likely are noise capturers. \begin{center} \begin{tabular}{|c| c | c | c |} \hline & Normalized Mutual Information & Homogeneity Score & Completeness Score \\ \hline \hline Dataset 1 & 0.001 & 0.002 & 0.001 \\ \hline Dataset 2 & 0.189 & 0.236 & 0.158 \\ \hline \end{tabular} \end{center} Values closer to 1 are better than 0 in the above table for each score metric. It seems that the performance of K-means is slightly better on dataset 2 than on 1. To improve the performance of this we would likely need to include a dimensionality reduction technique, or modify the dataset so that clusters are more hyper-spherical and separated from each other in the feature space. \subsection{Expectation Maximization}\label{subsec:expectation-maximization} \begin{figure} \begin{minipage}{0.5\textwidth} \centering \includegraphics[width=.9\linewidth]{gmmcomponentsds1.png} \caption{n\_components vs AIC/BIC DS1}\label{Fig:GMM DS1} \end{minipage}\hfill \begin{minipage}{0.5\textwidth} \centering \includegraphics[width=.9\linewidth]{gmmcomponentsds2.png} \caption{n\_components vs AIC/BIC DS2}\label{Fig:GMM DS2} \end{minipage} \end{figure} For expectation maximization, I used the scikit-learn Gaussian mixture model function. Based on my exploration, I do not believe that the data are distributed in a hyper-spherical format, therefore a GMM would be able to better group samples based on its ability to accommodate non-spherical hyper-surfaces. In order to select a best value for the number of estimators in the model, I plotted the Bayesian Information Criterion vs the Akaike Information Criterion, where they begin to diverge is likely the best place to stop in terms of n\_estimators. In this case looking at figures~\ref{Fig:GMM DS1} and~\ref{Fig:GMM DS2} it appears to be the same as the optimal K found previously (4 and 3 respectively.) After finding these and running best fits and comparing results I got: \begin{center} \begin{tabular}{|c| c | c | c |} \hline & Normalized Mutual Information & Homogeneity Score & Completeness Score \\ \hline \hline Dataset 1 & 0.002 & 0.002 & 0.002 \\ \hline Dataset 2 & 0.133 & 0.174 & 0.108 \\ \hline \end{tabular} \end{center} These clusters more or less make sense much in the same way that K-means did. For dataset 1, GMM outperformed in terms of mutual information, homogeneity, and completeness shared by the clustering techniques and the actual labels whereas for dataset 2 K-means outperformed, which indicates that the data was slightly more spherical than I anticipated. However, neither of these clustering techniques did very well since all the values were close to 0 indicating that assigment to clusters was independent and that the clusters themselves were not very independent, nor complete.
{ "alphanum_fraction": 0.7160115979, "avg_line_length": 62.08, "ext": "tex", "hexsha": "a5e0e5ad009270f0002036e5b190d3fb7ad26204", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e37e7b9259237adffbeb36ccc8dd17f67892286a", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "zparnold/cs7641", "max_forks_repo_path": "assignment3/clustering-discussion.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e37e7b9259237adffbeb36ccc8dd17f67892286a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "zparnold/cs7641", "max_issues_repo_path": "assignment3/clustering-discussion.tex", "max_line_length": 219, "max_stars_count": null, "max_stars_repo_head_hexsha": "e37e7b9259237adffbeb36ccc8dd17f67892286a", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "zparnold/cs7641", "max_stars_repo_path": "assignment3/clustering-discussion.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1495, "size": 6208 }
\subsection{Restatement of Problem} With basic knowledge of the earth's carbon circle and the vital agents in decomposing wood fibers called fungi, we analyze the relationship of two traits mentioned above and the rate of decomposition. Our team is assigned to solve the problems below. \begin{itemize} \item[$\circledcirc$] \textbf{Problem 1:} Build the \textbf{decomposition model} of ground litter and woody fibers by calculating the \textbf{fungal activity factor}. Considering the interactions of fungi, fungal growth rate and moisture tolerance, the decomposition model would be optimized. \item[$\circledcirc$] \textbf{Problem 2:} Describe the \textbf{interactions} between different species of fungi, and characterize the \textbf{trends} of short- and long-term interactions. \item[$\circledcirc$] \textbf{Problem 3:} Analyze different \textbf{influences} on each isolate and fungal combinations \textbf{caused by different environments}, including arid, semi-arid, temperate, arboreal, and tropical rain forests. \item[$\circledcirc$] \textbf{Problem 4:} Assess the impact of the fungal communities' diversity on the earth' carbon circle efficiency, and explore the importance and role of fungal communities' \textbf{biodiversity} in the presence of different degrees of variability in the local environment. \end{itemize}
{ "alphanum_fraction": 0.8005974608, "avg_line_length": 167.375, "ext": "tex", "hexsha": "1cbce2edaced5ec0cac2e8b28343cf915274b10b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3eaf143f4319fae681d98134bfc7e699833d8273", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "syy11cn/2021-mcm-meritorious-article", "max_forks_repo_path": "1/2.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3eaf143f4319fae681d98134bfc7e699833d8273", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "syy11cn/2021-mcm-meritorious-article", "max_issues_repo_path": "1/2.tex", "max_line_length": 297, "max_stars_count": 3, "max_stars_repo_head_hexsha": "3eaf143f4319fae681d98134bfc7e699833d8273", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "syy11cn/2021-mcm-meritorious-article", "max_stars_repo_path": "1/2.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-11T10:37:34.000Z", "max_stars_repo_stars_event_min_datetime": "2021-11-07T14:38:30.000Z", "num_tokens": 318, "size": 1339 }
\documentclass[submission, Phys, hidelnks]{SciPost} \usepackage{physics} \usepackage{graphicx} \usepackage[ruled, vlined]{algorithm2e} \usepackage{amsmath, bm} \usepackage{dsfont} \usepackage{listings} \usepackage{color} \usepackage{xcolor} \DeclareFixedFont{\ttb}{T1}{txtt}{bx}{n}{10} \DeclareFixedFont{\ttm}{T1}{txtt}{m}{n}{10} \definecolor{deepblue}{rgb}{0,0,0.7} \definecolor{deepred}{rgb}{0.6,0,0} \definecolor{deepgreen}{rgb}{0,0.5,0} \newcommand\pythonstyle{\lstset{ language=Python, keepspaces=true, basicstyle=\ttfamily\small, tabsize=3, keywordstyle=\color{keywordcolour}, frame=single, keywordstyle=\color{deepblue}, emph={SigmaX, SigmaZ, __init__, RenyiEntropy, swap}, emphstyle=\color{deepred}, stringstyle=\color{deepgreen}, showstringspaces=false, commentstyle=\color{deepgreen}\ttfamily }} \lstnewenvironment{python}[1][]{ \pythonstyle{} \lstset{#1} } {} \newcommand\pythoninline[1]{{\pythonstyle\lstinline!#1!}} \newcommand{\red}[1]{{\color{red} #1}} \newcommand{\x}{\bm{\mathrm{x}}} \SetKwComment{Comment}{$\triangleright$\ }{} \begin{document} \begin{center}{\Large \textbf{ QuCumber: wavefunction reconstruction with neural networks }}\end{center} \begin{center} Matthew~J.~S.~Beach\textsuperscript{1,2}, Isaac~De~Vlugt\textsuperscript{2}, Anna~Golubeva\textsuperscript{1,2}, Patrick~Huembeli\textsuperscript{1,3}, Bohdan~Kulchytskyy\textsuperscript{1,2}, Xiuzhe~Luo\textsuperscript{2}, Roger~G.~Melko\textsuperscript{1,2*}, Ejaaz~Merali\textsuperscript{2}, Giacomo~Torlai\textsuperscript{1,2,4} \end{center} \begin{center} ${\bf 1}$ Perimeter Institute for Theoretical Physics, Waterloo, Ontario N2L 2Y5, Canada \\ \mbox{${\bf 2}$ Department of Physics and Astronomy, University of Waterloo, Ontario N2L 3G1, Canada} \\ ${\bf 3}$ ICFO-Institut de Ciencies Fotoniques, Barcelona Institute of Science and Technology, 08860 Castelldefels (Barcelona), Spain \\ ${\bf 4}$ Center for Computational Quantum Physics, Flatiron Institute, 162 5th Avenue, New York, NY 10010, USA\\ * \href{mailto:[email protected]}{[email protected]} \end{center} \begin{center} \today \end{center} \section*{Abstract} {\bf As we enter a new era of quantum technology, it is increasingly important to develop methods to aid in the accurate preparation of quantum states for a variety of materials, matter, and devices. Computational techniques can be used to reconstruct a state from data, however the growing number of qubits demands ongoing algorithmic advances in order to keep pace with experiments. In this paper, we present an open-source software package called QuCumber that uses machine learning to reconstruct a quantum state consistent with a set of projective measurements. QuCumber uses a restricted Boltzmann machine to efficiently represent the quantum wavefunction for a large number of qubits. New measurements can be generated from the machine to obtain physical observables not easily accessible from the original data. } \noindent\rule{\textwidth}{1pt} \tableofcontents\thispagestyle{fancy} \noindent\rule{\textwidth}{1pt} \vspace{-1cm} \section{Introduction} Current advances in fabricating quantum technologies, as well as in reliable control of synthetic quantum matter, are leading to a new era of quantum hardware where highly pure quantum states are routinely prepared in laboratories. With the growing number of controlled quantum degrees of freedom, such as superconducting qubits, trapped ions, and ultracold atoms~\cite{kandala_hardware-efficient_2017,moll_quantum_2018,bernien_probing_2017,zhang_observation_2017}, reliable and scalable classical algorithms are required for the analysis and verification of experimentally prepared quantum states. Efficient algorithms can aid in extracting physical observables otherwise inaccessible from experimental measurements, as well as in identifying sources of noise to provide direct feedback for improving experimental hardware. However, traditional approaches for reconstructing unknown quantum states from a set of measurements, such as quantum state tomography, often suffer the exponential overhead that is typical of quantum many-body systems. Recently, an alternative path to quantum state reconstruction was put forward, based on modern machine learning (ML) techniques~\cite{torlai_learning_2016,torlai_neural-network_2018,torlai_latent_2018,carrasquilla_reconstructing_2018,lennon_efficiently_2018,kim_mixed_2018}. The most common approach relies on a powerful generative model called a \textit{restricted Boltzmann machine} (RBM)~\cite{smolensky_information_1986}, a stochastic neural network with two layers of binary units. A visible layer $\bm{v}$ describes the physical degrees of freedom, while a hidden layer $\bm{h}$ is used to capture high-order correlations between the visible units. Given a set of neural network parameters $\bm{\lambda}$, the RBM defines a probabilistic model described by the parametric distribution $p_{\bm{\lambda}}(\bm{v})$. RBMs have been widely used in the ML community for the pre-training of deep neural networks~\cite{hinton_training_2002}, for compressing high-dimensional data into lower-dimensional representations~\cite{hinton_reducing_2006}, and more~\cite{lecun_deep_2015}. More recently, RBMs have been adopted by the physics community in the context of representing both classical and quantum many-body states~\cite{CarleoTroyer2017Science,carleo_constructing_2018}. They are currently being investigated for their representational power~\cite{gao_efficient_2017,choo_symmetries_2018,glasser_neural-network_2018}, their relationship with tensor networks and the renormalization group~\cite{mehta_exact_2014,koch-janusz_mutual_2018,iso_scale-invariant_2018,lenggenhager_optimal_2018,chen_equivalence_2018}, and in other contexts in quantum many-body physics~\cite{PhysRevB.96.205152,weinstein_neural_2018,RBM_stabilizer}. In this post, we present QuCumber: a \textit{quantum calculator used for many-body eigenstate reconstruction}. QuCumber is an open-source Python package that implements neural-network quantum state reconstruction of many-body wavefunctions from projective measurement data. Examples of data to which QuCumber could be applied might be magnetic spin projections, orbital occupation number, polarization of photons, or the logical state of qubits. Given a training set of such measurements, QuCumber discovers the most likely compatible quantum state by finding the optimal set of parameters $\bm{\lambda}$ of an RBM.\@ A properly trained RBM is an approximation of the unknown quantum state underlying the data. It can be used to calculate various physical observables of interest, including measurements that may not be possible in the original experiment. This post is organized as follows. In Section~\ref{sec:positive}, we introduce the reconstruction technique for the case where all coefficients of the wavefunction are real and positive. We discuss the required format for input data, as well as training of the RBM and the reconstruction of typical observables. In Section~\ref{sec:complex}, we consider the more general case of a complex-valued wavefunction. We illustrate a general strategy to extract the phase structure from data by performing appropriate unitary rotations on the state before measurements. We then demonstrate a practical reconstruction of an entangled state of two qubits. Note, the detailed theory underlying the reconstruction methods used by QuCumber can be found in the original references \cite{torlai_learning_2016,torlai_neural-network_2018} and a recent review \cite{RBMreview}. A list of useful terms and equations can be found at the end of this post in the Glossary. \section{Positive wavefunctions}\label{sec:positive} We begin by presenting the application of QuCumber to reconstruct many-body quantum states described by wavefunctions $\ket{\Psi}$ with positive coefficients $\Psi(\x)=\braket{\x}{\Psi} \ge 0$, where $\ket{\x}=\ket{ \mathrm{x}_1,\dots,\mathrm{x}_N }$ is a reference basis for the Hilbert space of $N$ quantum degrees of freedom. The neural-network quantum state reconstruction requires raw data $\mathcal{D}=(\x_1,\x_2,\dots)$ generated through projective measurements of the state $\ket{ \Psi}$ in the reference basis. These measurements adhere to the probability distribution given by the Born rule, $P(\x)=|\Psi(\x)|^2$. Since the wavefunction is strictly positive, the quantum state is completely characterized by the measurement distribution, i.e.~$\Psi(\x)=\sqrt{P(\x)}$. The positivity of the wavefunction allows a simple and natural connection between quantum states and classical probabilistic models. QuCumber employs the probability distribution $p_{\bm{\lambda}}(\x)$ of an RBM (see Eq.~\ref{Eq:marginal_distribution} of the Glossary) to approximate the distribution $P(\x)$ underlying the measurement data. Using contrastive divergence (CD)~\cite{hinton_training_2002}, QuCumber trains the RBM to discover an optimal set of parameters ${\bm \lambda}$ that minimize the Kullback-Leibler (KL) divergence between the two distributions (see Eq.~\ref{Eq.KLdiv}). Upon successful training ($p_{\bm{\lambda}}(\x)\sim P(\x)$), we obtain an approximate representation of the target quantum state, \begin{equation}\label{wfpd} \psi_{\bm{\lambda}}(\x) \equiv \sqrt{p_{\bm{\lambda}}(\x)} \simeq\Psi(\x)\:. \end{equation} Note, the precise mathematical form of the marginal distribution $p_{\bm{\lambda}}(\x)$ defined in terms of an effective energy over the parameters of the RBM is defined in the Glossary. In the following, we demonstrate the application of QuCumber for the reconstruction of the ground-state wavefunction of the one-dimensional transverse-field Ising model (TFIM). The Hamiltonian is \begin{equation} \hat{H} = -J\sum_i \hat{\sigma}^z_i \hat{\sigma}^z_{i+1} - h \sum_i\hat{\sigma}^x_i\:, \label{TFIM} \end{equation} where $\hat{\sigma}^{x/z}_i$ are spin-1/2 Pauli operators acting on site $i$, and we assume open boundary conditions. For this example, we consider a chain with $N=10$ spins at the quantum critical point $J=h=1$. \subsection{Setup}\label{subsec:example} Given the small size of the system, the ground state $\ket{ \Psi}$ can be found with exact diagonalization. The training dataset $\mathcal{D}$ is generated by sampling the distribution $P(\bm{\sigma}^z)=|\Psi(\bm{\sigma}^z)|^2$, obtaining a sequence of $N_S=10^5$ independent spin projections in the reference basis $\x = \bm{\sigma}^z$.\footnote{ The training dataset can be downloaded from \href{https://github.com/PIQuIL/QuCumber/blob/master/examples/Tutorial1_TrainPosRealWaveFunction/tfim1d_data.txt}{\texttt{https://github.com/PIQuIL/QuCumber/blob/master/\\examples/Tutorial1\_TrainPosRealWaveFunction/tfim1d\_data}} } Each data point in $\mathcal{D}$ consists of an array $\bm{\sigma}^z_j=(\sigma^z_1,\dots,\sigma^z_N)$ with shape \verb|(N,)| and should be passed to QuCumber as a numpy array or torch tensor. For example, $\bm{\sigma}^z_j=$ \verb|np.array([1,0,1,1,0,1,0,0,0,1])|, where we use $\sigma_j^z=0,1$ to represent a spin-down and spin-up state respectively. Therefore, the entire input data set is contained in an array with shape \verb|(N_S, N)|. Aside from the training data, QuCumber also allows us to import an exact wavefunction. This can be useful for monitoring the quality of the reconstruction during training. In our example, we evaluate the fidelity between the reconstructed state $\psi_{\bm{\lambda}}(\x)$ and the exact wavefunction $\Psi(\x)$. The training dataset, \verb|train_data|, and the exact ground state, \verb|true_psi|, are loaded with the data loading utility as follows: \begin{python} import qucumber.utils.data as data train_path = "tfim1d_data.txt" psi_path = "tfim1d_psi.txt" train_data, true_psi = data.load_data(train_path, psi_path) \end{python} If \verb|psi_path| is not provided, QuCumber will load only the training data. Next, we initialize an RBM quantum state $\psi_{\bm{\lambda}}(\bf{x})$ with random weights and zero biases using the constructor \verb|PositiveWaveFunction|: \begin{python} from qucumber.nn_states import PositiveWaveFunction state = PositiveWaveFunction(num_visible=10, num_hidden=10) \end{python} The number of visible units (\verb|num_visible|) must be equal to the number of physical spins $N$, while the number of hidden units (\verb|num_hidden|) can be adjusted to systematically increase the representational power of the RBM.\@ The quality of the reconstruction will depend on the structure underlying the specific quantum state and the ratio of visible to hidden units, $\alpha = \verb|num_hidden|/\verb|num_visible|$. In practice, we find that $\alpha = 1$ often leads to good approximations of positive wavefunctions~\cite{torlai_neural-network_2018}. However, in the general case, the value of $\alpha$ required for a given wavefunction should be explored and adjusted by the user. \subsection{Training} Once an appropriate representation of the quantum state has been defined, QuCumber trains the RBM through the function \verb|PositiveWaveFunction.fit|. Several input parameters need to be provided aside from the training dataset (\verb|train_data|). These include the number of training iterations (\verb|epochs|), the number of samples used for the positive/negative phase of CD (\verb|pos_batch_size|/\verb|neg_batch_size|), the learning rate (\verb|lr|) and the number of sampling steps in the negative phase of CD (\verb|k|). The last argument (\verb|callbacks|) allows the user to pass a set of additional functions to be evaluated during training. As an example of a callback, we show the \verb|MetricEvaluator|, which evaluates a function \verb|log_every| epochs during training. Given the small system size and the knowledge of the true ground state, we can evaluate the fidelity between the RBM state and the true ground-state wavefunction (\verb|true_psi|). Similarly, we can calculate the KL divergence between the RBM distribution $p_{\bm{\lambda}}(\x)$, and the data distribution $P(\x)$, which should approach zero for a properly trained RBM.\@ For the current example, we monitor the fidelity and KL divergence (defined in \verb|qucumber.utils.training_statistics|): \begin{python} from qucumber.callbacks import MetricEvaluator import qucumber.utils.training_statistics as ts log_every = 10 space = state.generate_hilbert_space(10) callbacks = [ MetricEvaluator( log_every, {"Fidelity": ts.fidelity, "KL": ts.KL}, target_psi=true_psi, space=space, verbose=True ) ] \end{python} With \verb|verbose=True|, the program will print the epoch number and all callbacks every \verb|log_every| epochs. For the current example, we monitor the fidelity and KL divergence (see Glossary). Note that the KL divergence is only tractable for small systems. The \verb|MetricEvaluator| will compute the KL exactly when provided with a list of all states in the Hilbert space. For convenience these can be generated with \\ \verb|space = state.generate_hilbert_space(10)|. Now that the metrics to monitor during training have been chosen, we can invoke the optimization with the \verb|fit| function of \verb|PositiveWaveFunction|. \begin{python} state.fit( train_data, epochs=500, pos_batch_size=100 neg_batch_size=100, lr=0.01 k=5, callbacks=callbacks, ) \end{python} Figure~\ref{fig:KL} shows the convergence of the fidelity and KL divergence during training. The convergence time will, in general, depend on the choice of hyperparameters. Finally, the network parameters $\bm{\lambda}$, together with the callbacks, can be saved (or loaded) to a file: \begin{python} state.save( "filename.pt", metadata={ "fidelity": callbacks[0].Fidelity, "KL": callbacks[0].KL }, ) state.load("filename.pt") \end{python} With this we have demonstrated the most basic aspects of QuCumber regarding training a model and verifying its accuracy. We note that in this example the evaluation utilized the knowledge of the exact ground state and the calculation of the KL divergence, which we reemphasize is tractable only for small system sizes. However, we point out that QuCumber is capable of carrying out the reconstruction of much larger systems. In such cases, users must rely on other estimators to evaluate the training, such as expectation values of physical observables (magnetization, energy, etc). In the following, we show how to compute diagonal and off-diagonal observables in QuCumber. \begin{figure}[hbt] \centering{} \includegraphics[width=\columnwidth, trim={0 15 0 0}, clip]{fid_KL.pdf} \caption{ The fidelity (left) and the KL divergence (right) during training for the reconstruction of the ground state of the one-dimensional TFIM.\@ }\label{fig:KL} \end{figure} \subsection{Reconstruction of physical observables}\label{Sec:Sampling_a-Trained_RBM} In this section, we discuss how to calculate the average value of a generic physical observable $\hat{\mathcal{O}}$ from a trained RBM.\@ We start with the case of observables that are diagonal in the reference basis where the RBM was trained. We then discuss the more general cases of off-diagonal observables and entanglement entropies. \subsubsection{Diagonal observables} We begin by considering an observable with only diagonal matrix elements, $\bra{ \bm{\sigma} } \hat{\mathcal{O}} \ket{ \bm{\sigma}^{\prime} }=\mathcal{O}_{\bm{\sigma}}\delta_{\bm{\sigma\sigma}^\prime}$ where for convenience we denote the reference basis ${\bf x}=\bm{\sigma}^z$ as $\bm{\sigma}$ unless otherwise stated. The expectation value of $\hat{\mathcal{O}}$ is given by \begin{equation} \ev*{\hat{\mathcal{O}}} = \frac{1}{\sum_{\bm{\sigma}} |\psi_{\bm{\lambda}}(\bm{\sigma})|^2} \sum_{\bm{\sigma}} \mathcal{O}_{\bm{\sigma}}|\psi_{\bm{\lambda}}(\bm{\sigma})|^2\:. \end{equation} The expectation value can be approximated by a Monte Carlo estimator, \begin{equation} \ev*{\hat{\mathcal{O}}} \approx \frac{1}{N_{\rm MC}} \sum_{k=1}^{N_{\rm MC}} \mathcal{O}_{\bm{\sigma}_k}\:, \end{equation} where the spin configurations $\bm{\sigma}_k$ are sampled from the RBM distribution $p_{\bm{\lambda}}(\bm{\sigma})$. This process is particularly efficient given the bipartite structure of the network which allows the use of block Gibbs sampling. A simple example for the TFIM is the average longitudinal magnetization per spin, $\ev{\hat\sigma^z} = \sum_j\ev*{\hat{\sigma}^z_j}/N$, which can be calculated directly on the spin configuration sampled by the RBM (i.e.,~the state of the visible layer). The visible samples are obtained with the \verb|sample| function of the RBM state object: \begin{python} samples = state.sample(num_samples=1000, k=10) \end{python} which takes the total number of samples (\verb|num_samples|) and the number of block Gibbs steps (\verb|k|) as input. Once these samples are obtained, the magnetization can be calculated simply as \begin{python} magnetization = samples.mul(2.0).sub(1.0).mean() \end{python} where we converted the binary samples of the RBM back into $\pm 1$ spins before taking the mean. \subsubsection{Off-diagonal observables} We turn now to the case of off-diagonal observables, where the expectation value assumes the following form \begin{equation} \ev*{\hat{\mathcal{O}}} = \frac{1}{\sum_{\bm{\sigma}} |\psi_{\bm{\lambda}}(\bm{\sigma})|^2} \sum_{\bm{\sigma\sigma}^\prime} \psi_{\bm{\lambda}}^*(\bm{\sigma}) \psi_{\bm{\lambda}}(\bm{\sigma}^\prime)\mathcal{O}_{\bm{\sigma\sigma}^\prime}\:. \end{equation} This expression can once again be approximated with a Monte Carlo estimator \begin{equation} \ev*{\hat{\mathcal{O}}} \approx \frac{1}{N_{\rm MC}} \sum_{k=1}^{N_{\rm MC}} \mathcal{O}^{[L]}_{\bm{\sigma}_k} \end{equation} of the so-called \emph{local estimator} of the observable: \begin{equation} \mathcal{O}^{[L]}_{\bm{\sigma}_k}=\sum_{\bm{\sigma}^\prime}\frac{\psi_{\bm{\lambda}}(\bm{\sigma}^\prime)}{\psi_{\bm{\lambda}}(\bm{\sigma}_k)} \mathcal{O}_{\bm{\sigma}_k\bm{\sigma}^\prime}\:. \end{equation} As long as the matrix representation $\mathcal{O}_{\bm{\sigma\sigma}^\prime}$ is sufficiently sparse in the reference basis, the summation can be evaluated efficiently. \begin{figure}[t] \centering{} \includegraphics[width=\columnwidth, trim={0 15 0 0}, clip]{obs.pdf} \caption{ Reconstruction of the magnetic observables for the TFIM chain with $N=10$ spins. We show the average longitudinal (left) and transverse (right) magnetization per site obtained by sampling from a trained RBM.\@ The dashed line denotes the results from exact diagonalization. }\label{tfim_magn} \end{figure} %\subsubsection{Transverse magnetization}\label{subsubsec:sigmax} As an example, we consider the specific case of the transverse magnetization for the $j$-th spin, $\ev*{\hat{\sigma}^x_j}$, with matrix elements \begin{equation} \mel{\bm{\sigma}}{\hat{\sigma}^x_j}{\bm{\sigma}^{\prime}}=\delta_{\sigma_j^\prime,1-\sigma_j}\prod_{i\ne j}\delta_{\sigma_i^\prime,\sigma_j}\:. \end{equation} Therefore, the expectation values reduces to the Monte Carlo average of the local observable \begin{equation} {(\sigma^x_j)}^{[L]}=\frac{\psi_{\bm{\lambda}}(\sigma_1,\dots,1-\sigma_j,\dots,\sigma_N)} {\psi_{\bm{\lambda}}(\sigma_1,\dots,\sigma_j,\dots,\sigma_N)} \:. \end{equation} evaluated on spin configurations $\bm{\sigma}_k$ sampled from the RBM distribution $p_{\bm{\lambda}}(\bm{\sigma})$. QuCumber provides an interface for sampling off-diagonal observables in the \verb|ObservableBase| class. Thorough examples are available in the tutorial section in the \href{https://qucumber.readthedocs.io/en/stable/}{documentation}.\!\! \footnote{The observables tutorial is available at \href{https://qucumber.readthedocs.io/en/stable/\_examples/Tutorial3\_DataGeneration\_CalculateObservables/tutorial\_sampling\_observables.html }{\texttt{https://qucumber.readthedocs.io/en/stable/\_examples\\/Tutorial3\_DataGeneration\_CalculateObservables/tutorial\_sampling\_observables.html}} } As an example, $\sigma^x$ can be written as an observable class with \begin{python} class SigmaX(ObservableBase): def apply(self, nn_state, samples): psi = nn_state.psi(samples) psi_ratio_sum = torch.zeros_like(psi) for i in range(samples.shape[-1]): # sum over spin sites flip_spin(i, samples) # flip the spin at site i # add ratio psi_(-i) / psi to the running sum psi_flip = nn_state.psi(samples) psi_ratio = cplx.elementwise_division(psi_flip, psi) psi_ratio_sum.add_(psi_ratio) flip_spin(i, samples) # flip it back # take real part and divide by number of spins return psi_ratio_sum[0].div_(samples.shape[-1]) \end{python} The value of the observable can be estimated from a set of samples with: \begin{python} SigmaX().statistics_from_samples(state, samples) \end{python} which produces a dictionary containing the mean, variance, and standard error of the observable. Similarly, the user can define other observables like the energy. The reconstruction of two magnetic observables for the TFIM is shown in Fig.~\ref{tfim_magn}, where a different RBM was trained for each value of the transverse field $h$. In the left plot, we show the average longitudinal magnetization per site, which can be calculated directly from the configurations sampled by the RBM.\@ In the right plot, we show the off-diagonal observable of transverse magnetization. In both cases, QuCumber successfully discovers an optimal set of parameters $\bm{\lambda}$ that accurately approximate the ground-state wavefunction underlying the data. \subsubsection{Entanglement entropy}\label{sec:swap} A quantity of significant interest in quantum many-body systems is the degree of entanglement between a subregion $A$ and its complement $\bar{A}$. Numerically, measurement of bipartite entanglement entropy is commonly accessed through the computation of the second R\'enyi entropy $S_2 = - \ln {\rm Tr}(\rho_A^2)$. When one has access to a pure state wavefunction $\psi_{\bm{\lambda}}(\x)$, R\'enyi entropies can be calculated as an expectation value of the ``Swap'' operator~\cite{Swap}, \begin{equation}\label{Eq:renyi_entropy} S_2 = - \ln \left\langle{ \widehat{\textrm{Swap}}_A }\right\rangle. \end{equation} It is essentially an off-diagonal observable that acts on an extended product space consisting of two independent copies of the wavefunction, $\psi_{\bm{\lambda}}(\x) \otimes \psi_{\bm{\lambda}}(\x)$, referred to as ``replicas''. As the name suggests, the action of the Swap operator is to swap the spin configurations in region $A$ between the replicas, \begin{equation} \widehat{\textrm{Swap}}_A |\bm{\sigma}_A, \bm{\sigma}_{\bar A}\rangle_1 \otimes |\bm{\sigma}^{\prime}_A, \bm{\sigma}^{\prime}_{\bar A}\rangle_2 = |\bm{\sigma}^{\prime}_A, \bm\sigma_{\bar A}\rangle_1 \otimes |\bm\sigma_A, \bm\sigma^{\prime}_{\bar A}\rangle_2 . \end{equation} Here the subcript of the ket indicates the replica index, while the two labels inside a ket, such as ${\bm \sigma}_A, {\bm \sigma}_{\bar A}$, describe the spins configurations within the subregion and its complement. In QuCumber, the Swap operator is implemented as a routine within the \verb|entanglement| observable unit, \begin{python} def swap(s1, s2, A): _s = s1[:, A].clone() s1[:, A] = s2[:, A] s2[:, A] = _s return s1, s2 \end{python} where \verb|s1| and \verb|s2| are batches of samples produced from each replica, and \verb|A| is a list containing the indices of the sites in subregion $A$. While ideally those samples should be entirely independent, in order to save computational costs, QuCumber just splits a given batch into two equal parts and treats them as if they were independent samples. This is implemented within the \verb|SWAP| observable, \begin{python} class SWAP(ObservableBase): def __init__(self, A): self.A = A def apply(self, nn_state, samples): _ns = samples.shape[0] // 2 samples1 = samples[:_ns, :] samples2 = samples[_ns : _ns * 2, :] psi_ket1 = nn_state.psi(samples1) psi_ket2 = nn_state.psi(samples2) psi_ket = cplx.elementwise_mult(psi_ket1, psi_ket2) psi_ket_star = cplx.conjugate(psi_ket) samples1_, samples2_ = swap(samples1, samples2, self.A) psi_bra1 = nn_state.psi(samples1_) psi_bra2 = nn_state.psi(samples2_) psi_bra = cplx.elementwise_mult(psi_bra1, psi_bra2) psi_bra_star = cplx.conjugate(psi_bra) return cplx.real( cplx.elementwise_division(psi_bra_star, psi_ket_star) ) \end{python} Note the similarity in the implementation to that for the transverse magnetization observable from the last section, once the amplitude of a sample is substituted with the product of amplitudes drawn from each replica. Using this observable, we can estimate the R\'enyi entropy of the region containing the first 5 sites in the chain using Eq.~\ref{Eq:renyi_entropy}, \begin{python} A = [0, 1, 2, 3, 4] swap_ = SWAP(A) swap_stats = swap_.statistics_from_samples(nn_state, new_samples) S_2 = -np.log(swap_stats["mean"]) \end{python} We apply this measurement procedure to a TFIM chain with results shown in Fig.~\ref{ee_learn}. As was the case with the magnetization observables, the trained RBM gives a good approximation to the second R\'enyi entropy for different subregion $A$ sizes. Being a basis-independent observable, this constitutes a useful test on the ability of QuCumber to capture the full wavefunction from the information contained in a single-basis dataset for TFIM. \begin{figure}[htb] \centering \includegraphics[]{ee} \caption{ The second R\'enyi entropy for the TFIM chain with $N=10$ spins. The number of sites in the entangled bipartition $A$ is indicated by the horizontal axis. The markers indicate values obtained through the ``Swap'' operator applied to the samples from a trained RBM.\@ The dashed line denotes the result from exact diagonalization. }\label{ee_learn} \end{figure} \section{Complex wavefunctions}\label{sec:complex} For positive wavefunctions, the probability distribution underlying the outcomes of projective measurements in the reference basis contains all possible information about the unknown quantum state. However, in the more general case of a wavefunction with a non-trivial sign or phase structure, this is not the case. In this section, we consider a target quantum state where the wavefunction coefficients in the reference basis can be complex-valued, $\Psi(\bm{\sigma})=\Phi(\bm{\sigma})e^{i\theta(\bm{\sigma})}$. We continue to choose the reference basis as $\bm{\sigma} = \bm{\sigma}^z$. We first need to generalize the RBM representation of the quantum state to capture a generic complex wavefunction. To this end, we introduce an additional RBM with marginalized distribution $p_{\bm{\mu}}(\bm{\sigma})$ parameterized by a new set of network weights and biases $\bm{\mu}$. We use this to define the quantum state as: \begin{equation} \psi_{\bm{\lambda} \bm{\mu}} (\bm{\sigma})= \sqrt{p_{\bm{\lambda}} (\bm{\sigma})} e^{i \phi_{\bm{\mu}} (\bm{\sigma})/2} \end{equation} where $\phi_{\bm{\mu}}(\bm{\sigma}) = \log (p_{\bm{\mu}} (\bm{\sigma}))$~\cite{torlai_neural-network_2018}. In this case, the reconstruction requires a different type of measurement setting. It is easy to see that projective measurements in the reference basis do not convey any information on the phases $\theta(\bm{\sigma})$, since $P(\bm{\sigma})=|\Psi(\bm{\sigma})|^2=\Phi^2(\bm{\sigma})$. The general strategy to learn a phase structure is to apply a unitary transformation $\bm{\mathcal{U}}$ to the state $\ket{\Psi}$ before the measurements, such that the resulting measurement distribution $P^{\:\prime}(\bm{\sigma})=|\Psi^\prime(\bm{\sigma})|^2$ of the rotated state $\Psi^\prime(\bm{\sigma})=\bra{ \bm{\sigma} } \:\bm{\mathcal{U}}\:\ket{ \Psi}$ contains fingerprints of the phases $\theta(\bm{\sigma})$ (Fig.~\ref{phase_learn}). In general, different rotations must be independently applied to gain full information on the phase structure. We make the assumption of a tensor product structure of the rotations, $\bm{\mathcal{U}}=\bigotimes_{j=1}^N\hat{\mathcal{U}}_j$. This is equivalent to a local change of basis from $\ket{ \bm{\sigma}}$ to $\lbrace|\bm{\sigma}^{\bm{b}}\rangle=|\sigma_1^{b_1},\dots,\sigma_N^{b_N}\rangle\rbrace$, where the vector $\bm{b}$ identifies the local basis $b_j$ for each site $j$. The target wavefunction in the new basis is given by \begin{equation} \begin{split} \Psi(\bm{\sigma}^{\bm{b}}) &=\langle \bm{\sigma}^{\bm{b}}|\Psi\rangle=\sum_{\bm{\sigma}}\langle \bm{\sigma}^{\bm{b}}|\bm{\sigma}\rangle\langle\bm{\sigma}|\Psi\rangle\\ &=\sum_{\bm{\sigma}}\bm{\mathcal{U}}(\bm{\sigma}^{\bm{b}},\bm{\sigma})\Psi(\bm{\sigma})\:, \end{split} \end{equation} and the resulting measurement distribution is \begin{equation} P_{\bm{b}}(\bm{\sigma}^{\bm{b}})=\bigg|\sum_{\bm{\sigma}}\bm{\mathcal{U}}(\bm{\sigma}^{\bm{b}},\bm{\sigma})\Psi(\bm{\sigma})\bigg|^2\:. \end{equation} \begin{figure}[htb] \centering \includegraphics[width=\columnwidth, trim={0 0 0 40}, clip]{2qubits_rotation} \caption{ Unitary rotations for two qubits. (left) Measurements on the reference basis. (right) Measurement in the rotated basis. The unitary rotation (the Hadamard gate on qubit $\sigma_0$) is applied after state preparation and before the projective measurement. }\label{phase_learn} \end{figure} To clarify the procedure, let us consider the simple example of a quantum state of two qubits: \begin{equation} |\Psi\rangle=\sum_{\sigma_0,\sigma_1}\Phi_{\sigma_0\sigma_1}e^{i\theta_{\sigma_0\sigma_1}}|\sigma_0\sigma_1\rangle\:, \end{equation} and rotation $\bm{\mathcal{U}}=\hat{\mathrm{H}}_0\otimes\hat{\mathcal{I}}_1$, where $\hat{\mathcal{I}}$ is the identity operator and \begin{equation} \hat{\mathrm{H}}=\frac{1}{\sqrt{2}}\begin{bmatrix}1 & 1\\ 1 & -1 \end{bmatrix} \end{equation} is called the {\it Hadamard gate}. This transformation is equivalent to rotating the qubit $\sigma_0$ from the reference $\sigma_0^z$ basis the the $\sigma_0^x$ basis. A straightforward calculation leads to the following probability distribution of the projective measurement in the new basis $|\sigma_0^x,\sigma_1\rangle$: \begin{equation} P_{\bm{b}}(\sigma_0^x,\sigma_1)=\frac{\Phi_{0\sigma_1}^2+\Phi_{1\sigma_1}^2}{4}+\frac{1-2\sigma_0^x}{2}\Phi_{0\sigma_1}\Phi_{1\sigma_1}\cos(\Delta\theta)\:, \end{equation} where $\Delta\theta=\theta_{0\sigma_1}-\theta_{1\sigma_1}$. Therefore, the statistics collected by measuring in this basis implicitly contains partial information on the phases. To obtain the full phases structure, additional transformations are required, one example being the rotation from the reference basis to the $\sigma^y_j$ local basis, realized by the elementary gate \begin{equation} \hat{\mathrm{K}}=\frac{1}{\sqrt{2}}\begin{bmatrix}1 & -i\\ 1 & i \end{bmatrix}\:. \end{equation} \subsection{Setup} We now proceed to use QuCumber to reconstruct a complex-valued wavefunction. For simplicity, we restrict ourselves to two qubits and consider the general case of a quantum state with random amplitudes $\Phi_{\sigma_0\sigma_1}$ and random phases $\theta_{\sigma_0\sigma_1}$. This example is available in the online tutorial. \footnote{The tutorial for complex wavefunctions can be found at \href{https://qucumber.readthedocs.io/en/stable/\_examples/Tutorial2_TrainComplexWaveFunction/tutorial_qubits.html }{\texttt{https://qucumber.readthedocs.io/en/\\stable/\_examples/Tutorial2\_TrainComplexWaveFunction/tutorial\_qubits.html}} } We begin by importing the required packages: \begin{python} from qucumber.nn_states import ComplexWaveFunction import qucumber.utils.unitaries as unitaries import qucumber.utils.cplx as cplx \end{python} Since we are dealing with a complex wavefunction, we load the corresponding module \verb|ComplexWaveFunction| to build the RBM quantum state $\psi_{\bm{\lambda\mu}}(\bm{\sigma})$. Furthermore, the following additional utility modules are required: the \verb|utils.cplx| backend for complex algebra, and the \verb|utils.unitaries| module which contains a set of elementary local rotations. By default, the set of unitaries include local rotations to the $\sigma^x$ and $\sigma^y$ basis, implemented by the $\hat{\mathrm{H}}$ and $\hat{\mathrm{K}}$ gates respectively. We continue by loading the data in QuCumber, which is done using the \verb|load_data| function of the data utility: \begin{python} train_path = "qubits_train.txt" train_bases_path = "qubits_train_bases.txt" psi_path = "qubits_psi.txt" bases_path = "qubits_bases.txt" train_samples, true_psi, train_bases, bases = data.load_data( train_path, psi_path, train_bases_path, bases_path ) \end{python} As before, we may load the true target wavefunction from \verb|qubits_psi.txt|, which can be used to calculate the fidelity and KL divergence. In contrast with the positive case, we now have measurements performed in different bases. Therefore, the training data consists of an array of qubits projections $(\sigma_0^{b_0},\sigma_1^{b_1}$) in \verb|qubits_train_samples.txt|, together with the corresponding bases $(b_0,b_1)$ where the measurement was taken, in \verb|qubits_train_bases.txt|. Finally, QuCumber loads the set of all the bases appearing the in training dataset, stored in \verb|qubits_bases.txt|. This is required to properly configure the various elementary unitary rotations that need to be applied to the RBM state during the training. For this example, we generated measurements in the following bases: \begin{equation} (b_0, b_1)=(\mathrm{z},\mathrm{z})\:,\:(\mathrm{x},\mathrm{z})\:,\:(\mathrm{z},\mathrm{x})\:,\:(\mathrm{y},\mathrm{z})\:,\:(\mathrm{z},\mathrm{y}) \end{equation} Finally, before the training, we initialize the set of unitary rotations and create the RBM state object. In the case of the provided dataset, the unitaries are the $\hat{\mathrm{H}}$ and $\hat{\mathrm{K}}$ gates. The required dictionary can be created with \verb|unitaries.create_dict()|. By default, when \verb|unitaries.create_dict()| is called, it will contain the identity, the $\hat{\mathrm{H}}$ gate, and the $\hat{\mathrm{K}}$ gate, with the keys \verb|Z|, \verb|X|, and \verb|Y|, respectively. It is possible to add additional gates by specifying them as \begin{python} U = torch.tensor([[<re_part>], [<im_part>]], dtype=torch.double) unitary_dict = unitaries.create_dict(<unitary_name>=U) \end{python} where \verb|re_part|, \verb|im_part|, and \verb|unitary_name| are to be specified by the user. We then initialize the complex RBM object with \begin{python} state = ComplexWaveFunction( num_visible=2 num_hidden=2, unitary_dict=unitary_dict ) \end{python} The key difference between positive and complex wavefunction reconstruction is the requirement of additional measurements in different basis. Despite this, loading the data, initializing models, and training the RBMs are all very similar to the positive case, as we now discuss. \subsection{Training} Like in the case of a positive wavefunction, for the complex case QuCumber optimizes the network parameters to minimize the KL divergence between the data and the RBM distribution. When measuring in multiple bases, the optimization now runs over the set of parameters $(\bm{\lambda},\bm{\mu})$ and minimizes the sum of KL divergences between the data distribution $P(\bm{\sigma}^{\bm{b}})$ and the RBM distribution $|\psi_{\bm{\lambda\mu}}(\bm{\sigma}^{\bm{b}})|^2$ for each bases $\bm{b}$ appearing in the training dataset~\cite{torlai_neural-network_2018}. For example, if a given training sample is measured in the basis $(\mathrm{x},\mathrm{z})$, QuCumber applies the appropriate unitary rotation $\bm{\mathcal{U}}=\hat{\mathrm{H}}_0\otimes\hat{\mathcal{I}}_1$ to the RBM state before collecting the gradient signal. Similar to the case of positive wavefunction, we generate the Hilbert space (to compute fidelity and KL divergence) and initialize the callbacks \begin{python} state.space = nn_state.generate_hilbert_space(nv) callbacks = [ MetricEvaluator( log_every, {"Fidelity": ts.fidelity, "KL": ts.KL}, target_psi=true_psi, bases=bases, verbose=True, space=state.space, ) ] \end{python} The training is carried out by calling the \verb|fit| function of \verb|ComplexWaveFunction|, given the set of hyperparameters \begin{python} state.fit( train_samples, epochs=100, pos_batch_size=10, neg_batch_size=10, lr=0.05, k=5, input_bases=train_bases, callbacks=callbacks, ) \end{python} In Fig.~\ref{fig:complex} we show the total KL divergence and the fidelity with the true two-qubit state during training. After successfully training a QuCumber model, we can once again compute expectation values of physical observables, as discussed in Section~\ref{Sec:Sampling_a-Trained_RBM}. \begin{figure}[htb] \centering{} \includegraphics[width=\textwidth, trim={0 15 0 0}, clip]{complex_fid_KL.pdf} \caption{ Training a complex RBM with QuCumber on random two-qubit data. We show the fidelity (left), and KL divergence (right), as a function of the training epochs. }\label{fig:complex} \end{figure} \section{Conclusion} We have introduced the open source software package QuCumber, a quantum calculator used for many-body eigenstate reconstruction. QuCumber is capable of taking input data representing projective measurements of a quantum wavefunction, and reconstructing the wavefunction using a restricted Boltzmann machine (RBM). Once properly trained, QuCumber can produce a new set of measurements, sampled stochastically from the RBM.\@ These samples, generated in the reference basis, can be used to verify the training of the RBM against the original data set. More importantly, they can be used to calculate expectation values of many physical observables. In fact, any expectation value typically estimated by conventional Monte Carlo methods can be implemented as an estimator in QuCumber. Such estimators may be inaccessible in the reference basis, for example. Or, they may be difficult or impossible to implement in the setup for which the original data was obtained. This is particularly relevant for experiments, where it is easy to imagine many possible observables that are inaccessible, due to fundamental or technical challenges. Future versions of QuCumber, as well as the next generation of quantum state reconstruction software, may explore different generative models, such as variational autoencoders, generative adversarial networks, or recurrent neural networks. The techniques described in this paper can also be extended to reconstruct mixed states, via the purification technique described in Reference \cite{torlai_latent_2018}. In addition, future techniques may include hybridization between machine learning and other well-established methods in computational quantum many-body physics, such as variational Monte Carlo and tensor networks \cite{carrasquilla_reconstructing_2018}. \section*{Acknowledgements} We acknowledge M. Albergo, G. Carleo, J. Carrasquilla, D. Sehayek, and L. Hayward Sierens for stimulating discussions. We thank the Perimeter Institute for Theoretical Physics for the continuing support of PIQuIL.\@ \paragraph{Author contributions} Authors are listed alphabetically. For an updated record of individual contributions, consult the repository at \url{https://github.com/PIQuIL/QuCumber/graphs/contributors}. \paragraph{Funding information} This research was supported by the Natural Sciences and Engineering Research Council of Canada (NSERC), the Canada Research Chair program, and the Perimeter Institute for Theoretical Physics. We also gratefully acknowledge the support of NVIDIA Corporation with the donation of the Titan Xp GPU used in this work. Research at Perimeter Institute is supported by the Government of Canada through Industry Canada and by the Province of Ontario through the Ministry of Research \& Innovation. P.~H. acknowledges support from ICFOstepstone, funded by the Marie Sklodowska-Curie Co-funding of regional, national and international programmes (GA665884) of the European Commission, as well as by the Severo Ochoa 2016{--}2019' program at ICFO (SEV{--}2015{--}0522), funded by the Spanish Ministry of Economy, Industry, and Competitiveness (MINECO). \appendix \section{Glossary}\label{Glossary} This section contains an overview of terms discussed in the document which are relevant for RBMs. For more detail we refer the reader to the code documentation on \url{https://qucumber.readthedocs.io/en/stable/}, and References~\cite{hinton_training_2002, hinton2012practical}. \begin{itemize} \item \textit{Batch}: A subset of data upon which the gradient is computed and the network parameters are adjusted accordingly. A smaller batch size often results in a more stochastic trajectory, while a large batch size more closely approximates the exact gradient and has less variance. \item \textit{Biases}: Adjustable parameters in an RBM, denoted by $b_j$ and $c_i$ in Eq.~\eqref{RBMenergy}. \item \textit{Contrastive divergence}: An approximate maximum-likelihood learning algorithm for RBMs~\cite{hinton_training_2002}. CD estimates the gradient of the effective energy~\eqref{RBMenergy} with respect to model parameters by using Gibbs sampling to compare the generated and target distributions. \item \textit{Energy}: The energy of the joint configuration $(\bm{v}, \bm{h})$ of a RBM is defined as follows: \begin{equation} E_{\bm{\lambda}}(\bm{v},\bm{h}) = - \sum\limits_{j=1}^{n_v} b_j v_j - \sum\limits_{i=1}^{n_h} c_i h_i - \sum\limits_{ij} h_i W_{ij} v_j. \label{RBMenergy} \end{equation} \item \textit{Effective energy}: Obtained from the energy by tracing out the hidden units $\bm{h}$; often called the ``free energy'' in machine learning literature. \begin{equation} \mathcal{E}_{\bm{\lambda}}(\bm{v}) = - \sum\limits_{j=1}^{n_v} b_j v_j - \sum\limits_{i=1}^{n_h} \log \left[ 1 + \exp \left( \sum\limits_{j}^{n_v} W_{ij}v_j +c_i\right) \right]. \label{RBMeffectiveenergy} \end{equation} \item \textit{Epoch}: A single pass through an entire training set. For example, with a training set of 1,000 samples and a batch size of 100, one epoch consists of 10 updates of the parameters. \item \textit{Gibbs sampling}: A Monte Carlo algorithm that samples from the conditional distribution of one variable, given the state of other variables. In an RBM, the restricted weight connectivity allows Gibbs sampling between the visible ``block'', conditioned on the hidden ``block'', and vice versa. \item \textit{Hidden units}: There are $n_h$ units in the hidden layer of the RBM, denoted by the vector $\bm{h}=(h_1, \ldots, h_{n_h})$. The number of hidden units can be adjusted to increase the representational capacity of the RBM.\@ \item \textit{Hyperparameters:} A set of parameters that are not adjusted by a neural network during training. Examples include the learning rate, number of hidden units, batch size, and number of training epochs. \item \textit{Joint distribution}: The RBM assigns a probability to each joint configuration $(\bm v, \bm h)$ according to the Boltzmann distribution: \begin{equation} p_{\bm{\lambda}}(\bm{v},\bm{h}) = \frac{1}{Z_{\bm{\lambda}}} e^{-E_{\bm{\lambda}}(\bm{v},\bm{h})}. \end{equation} \item \textit{KL divergence}: The Kullback-Leibler divergence, or relative entropy, is a measure of the ``distance'' between two probability distributions $P$ and $Q$, defined as: \begin{equation} \label{Eq.KLdiv} \mathrm{KL}(P\:||\:Q)=\sum_{\bm{v}}P(\bm{v})\log\frac{P(\bm{v})}{Q(\bm{v})} . \end{equation} The KL divergence between two identical distributions is zero. Note that it is not symmetric between $P$ and $Q$. \item \textit{Learning rate}: The step size used in the gradient descent algorithm for the optimization of the network parameters. A small learning rate may result in better optimization but will take more time to converge. If the learning rate is too high, training might not converge or will find a poor optimum. \item \textit{Marginal distribution}: Obtained by marginalizing out the hidden layer from the joint distribution via \begin{equation}\label{Eq:marginal_distribution} p_{\bm{\lambda}}(\bm{v}) = \frac{1}{Z_{\bm{\lambda}}} \sum_{\bm{h}} e^{-E_{\bm{\lambda}}(\bm{v},\bm{h})} = \frac{1}{Z_{\bm{\lambda}}} e^{- \mathcal{E}_{\bm{\lambda}}(\bm{v})}. \end{equation} \item \textit{QuCumber}: A quantum calculator used for many-body eigenstate reconstruction. \item \textit{Parameters}: The set of weights and biases $\bm{\lambda} = \{\bm{W},\bm{b},\bm{c}\}$ characterizing the RBM energy function. These are adjusted during training. \item \textit{Partition function}: The normalizing constant of the Boltzmann distribution. It is obtained by tracing over all possible pairs of visible and hidden vectors: \begin{equation} Z_{\bm{\lambda}} = \sum\limits_{\bm{v},\bm{h}}e^{-E_{\bm{\lambda}}(\bm{v},\bm{h})}. \end{equation} \item \textit{Restricted Boltzmann Machine}: A two-layer network with bidirectionally connected stochastic processing units. ``Restricted'' refers to the connections (or weights) between the visible and hidden units: each visible unit is connected with each hidden unit, but there are no intra-layer connections. \item \textit{Visible units}: There are $n_v$ units in the visible layer of the RBM, denoted by the vector $\bm{v}=(v_1,\dots,v_{n_v})$. These units correspond to the physical degrees of freedom. In the cases considered in this paper, the number of visible units is equal to the number of spins $N$. \item \textit{Weights}: $W_{ij}$ is the symmetric connection or interaction between the visible unit $v_j$ and the hidden unit $h_i$. \end{itemize} \bibliography{bib/RBMs,bib/RGandDL,bib/TNandDL,bib/MLtomography,bib/RBMphysics,bib/bibliography,bib/representationsDL,bib/moreReferences} \nolinenumbers{} \end{document}
{ "alphanum_fraction": 0.7554259332, "avg_line_length": 49.659160696, "ext": "tex", "hexsha": "1822d52fae897c863cd34c6bba403e25f9800472", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-05-25T07:54:15.000Z", "max_forks_repo_forks_event_min_datetime": "2021-05-25T07:54:15.000Z", "max_forks_repo_head_hexsha": "128f68b7a2b1187a7f252b29c87e2de724fa200f", "max_forks_repo_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "max_forks_repo_name": "GTorlai/QuCumber", "max_forks_repo_path": "docs/scipost/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "128f68b7a2b1187a7f252b29c87e2de724fa200f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "max_issues_repo_name": "GTorlai/QuCumber", "max_issues_repo_path": "docs/scipost/main.tex", "max_line_length": 440, "max_stars_count": 2, "max_stars_repo_head_hexsha": "128f68b7a2b1187a7f252b29c87e2de724fa200f", "max_stars_repo_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "max_stars_repo_name": "GTorlai/QuCumber", "max_stars_repo_path": "docs/scipost/main.tex", "max_stars_repo_stars_event_max_datetime": "2021-06-18T18:37:30.000Z", "max_stars_repo_stars_event_min_datetime": "2021-05-25T07:54:11.000Z", "num_tokens": 13224, "size": 48517 }
\section{Introduction} \label{891_1:sec:introduction} %% %% %% Now in Thesis introduction %% %% %% By the middle of the last century it was well established that the %% scale-heights and velocity dispersions of stars in the solar %% neighborhood increase with age \citep[see][for a summary of this early %% work, particularly the chapters contributed by Elvius and %% Delhaye]{Blaauw65}. The seminal work by \citet{Roman50} demonstrated %% that the disk kinematics also depended on metallicity. Today these %% patterns are known in the literature on Galactic archaeology as %% age-velocity-metallicity (abundance) relations \citep[AVM$\alpha$-R; %% e.g.,][]{Aumer09,Minchev14}. Observational advances continued for %% the solar neighborhood \citep[e.g.,][]{Edvardsson93, Dehnen98, %% Nordstrom04}, and by the beginning of this century the complexity of %% these relations have been mapped throughout much of Milky Way (MW) by %% wide-field spectroscopic surveys (e.g., RAVE, \citealt{steinmetz06a}; %% BRAVA, \citealt{howard08a}, SEGUE, \citealt{yanny09a}, LAMOST, %% \citealt{zhao12a} GALAH, \citealt{desilva15a}, %% Gaia-ESO,\citealt{gilmore12a}; and APOGEE-1 and -2, %% \citealt{Majewski15}). The radial gradients in these relations are %% beautifully shown by, e.g., by \citet{Bovy12c} and \citet{Hayden15}, %% illustrating both metallicity and abundance as well-known %% complementary chemical-evolutionary tracers. Despite a century of %% remarkable progress, two broad but intertwined questions remain: (i) %% What are the astrophysical processes (i.e., the chemo-dynamical %% explanation) leading to the observed relations; and (ii) are these %% patterns generic for spiral disks or specific to the Milky Way? %% Setting aside chemical evolution for simplicity, there has been a %% long-standing debate about the origin of the vertical stratification %% of disk stars in phase-space as a function of age (the age-velocity %% relation, or AV-R). Historically the argument has been in the context %% of dynamical heating from two-body scattering \citep{Spitzer51}, but %% the scattering source has been debated \citep[e.g., giant molecular %% clouds, transient spiral structure, or dwarf satellite %% galaxies][]{Spitzer51,Spitzer53,Wielen77,Quinn93,Binney00}, and none %% have proven satisfactory to explain the MW's thick disk. This %% framework has been salvaged but also up-ended by relatively recent %% evidence for the increasing turbulence (and presumably thickness) of %% ionized gas in disks at higher redshift %% \citep{Weiner06,Forster-Schreiber09,Wisnioski15}. It seems plausible %% that early phases of disk formation involved gas cooling, leaving %% behind an old thick-disk stellar component %% \citep{Brook04,Bournaud09}. However, thinner relic layers would also %% emerge as time progressed \citep{Bird13}, depending critically on the %% cooling time-scale for the gas in the presence of star-formation, AGN %% feedback, and accretion, but without invoking any need for dynamical %% heating. Ironically, this `settling' of the stellar disk with %% population age is not unlike the predictions of monolithic collapse %% from \citet{ELS}, albeit now consistent in the context of bottom-up, %% or hierarchical structure formation as seen in recent simulations %% \citep[e.g.,][]{Bird13,Martig14a}. Because it is no longer clear if, %% loosely speaking, disks `heat' or `cool' to form the the vertical %% stratification of disk stars in phase-space, and likely both modes %% play a role at late and early times, respectively, henceforth we refer %% instead to `dynamical stratification' as a phenomenon that captures %% both general physical processes. %% The recent simulations noted above show there is a rich history of %% radial and vertical build up of stellar populations that involves and %% interplay between the cooling of the gas, the impact of mergers and %% accretion, and at late times the classical heating processes noted %% above. This richness suggests the possibility for a diversity of %% astrophysical paths in disk formation that could lead to significantly %% different structure in galaxies, exhibited in their %% AVM$\alpha$-Rs. Hence, the broader question of whether the MW is %% representative of the external disk galaxy population becomes salient. %% Little is known about the dynamical stratification rates for stars in %% spiral outside the Milky Way, but recent studies of stellar %% populations and dynamical stratification in low-mass spiral galaxies %% \citep{Seth05a,Bernard15} have shown dramatic differences in the %% age-metallicty and age-velocity dispersion relations when compared to %% the Milky Way. Recent measurements of the stellar velocity dispersions %% in M31 \citep{Dorman15} show that there are also gradients in %% dispersion with age and metallicity, but the amplitudes and %% time-scales are larger than in the MW. Differences in velocity %% dispersion amplitudes may reflect a more massive or thinner M31 disk, %% but possibly also a different dynamical history -- for gas settling or %% stellar dynamical heating. Clearly more data on the stellar properties %% of external galaxies is needed. The above studies serve as a gold %% standard since they are based on studies of resolved stellar %% populations. Because there are no massive spiral galaxies outside of %% the Local Group for which we can resolve stellar populations at %% surface-densities high enough to probe most of the disk, it is %% imperative to undertake studies based on integrated starlight. % Underpinning stellar dynamical mass estimates are assumptions about % the distribution of matter in phase-space. Of importance are the % density distribution and orbital anisotropy of the luminous tracers % (Emsellem et al. 2011), both difficult to infer because of geometric % projection. Typical mass-modeling assumptions include dynamical % equilibrium, constant $\Upsilon_*$ and orbital anisotropy. In % star-forming disks there is every reason to believe these assumptions % are invalid, even in the vertical direction where luminous young % stars, gas and dust are in a thin mid-plane layer embedded in % successively older, thicker, and dynamically warmer stellar % populations \citep{Mihalas68,Wielen77,Binney00}. Disk components are % expected to diffuse in a secular fashion through a dynamical heating % process, forming layers in phase-space. Galaxy disks provide a unique % laboratory for testing mass-modeling assumptions because they are % dynamically cold, while on-going star-formation ensures that all % phases of stellar evolution are present. What is critical is to % measure the heating rate. % For the younger and intermediate-age disk components which, for a disk % like the MW, contains most of the light and a significant amount of % the stellar disk mass over a modest range of metallicity (and % therefore plausibly similar IMF). The heating time-scale over the past % 4-6 Gyr in the solar neighborhood is comparable to the disk radial and % vertical dynamical time-scales \citep{Aumer09}; even quiescent disks % are not in true equilibrium. Determining disk heating rates pins down % vertical gradients in $\Upsilon_*$ to within some {\it constant} scale % factor (the IMF), thereby enabling a conversion from luminosity to % {\it relative} density profiles. % The main thrust of this program is to exploit vertical population % gradients in star-forming disks to establish disk-heating rates. This % will provide constraints on the cause(s) of heating, and eliminate % systematics from our determination of disk gravitational potentials % and thereby accurately calibrate $\Upsilon_*$. This can be done by % combining spectroscopic indices that correlate stellar age to their % kinematics and spatial location: By using integrated starlight as a % {\it chronometer} it is possible to determine the dynamical evolution % of disks. Because $\Upsilon_*$ evolves with stellar population age % independent of uncertainties in the low-mass end of the IMF, such % measurements decouple IMF uncertainties from the interplay between the % relative distribution of luminosity and the dynamically-inferred % mass. In other words, by measuring disk heating rates we will be able % to distinguish between the mass-weighted scale-height of the disk and % luminosity-weighted scale-height of our dynamical tracers. To measure % disk-heating and apply the results to an accurate mass-decomposition % of spiral disks our program has this two-step framework: % % \leftskip=0.35in % {\it % % \medskip % \noindent (i) determine the vertical age gradients and hence the disk % heating rate in detail for a small but representative sample of % $\sim$20 edge-on galaxies; and then % % \medskip % \noindent (ii) calibrate a proxy for stellar velocity dispersion % applicable to large samples of star-forming galaxies at all % inclinations, and to young stars and old stars. Use this proxy to % constrain heating rates and dynamical models of stellar disks to infer % their mass-density. % % } % \leftskip=0in %\subsection{Experimental Design} In this paper we present a detailed, spectroscopic study of stellar populations in the nearby, edge-on galaxy NGC 891. We measure these populations in integrated starlight mapped over much of the visible stellar disk in both radius and height. The importance of looking at NGC 891 as well as other nearby edge-on disks is several fold. First, it allows us to expand the census of spiral disks beyond the Local Group, particularly for a geometric projection that allows us to examine the spatial gradients in both radius and height more directly than in either M31 or the MW. Second, NGC 891, while historically called out as a nearby MW analog, clearly has some properties that are exceptional, such as a large HI scale height \citep[$\sim$1 kpc][]{Oosterloo07}, consistent with an extended dust layer \citep{Howk00}. Measurements of vertical and radial population gradients in NGC 891 will provide a better picture of the range of such gradients in MW-like systems, pointing to the range and multiplicity of disk formation and assembly scenarios of MW-like galaxies. As a benchmark for what we might expect to see in terms of AVM$\alpha$-R (the Age, Velocity, Metallicity, enhancement relation) in the integrated light of an external, edge-on galaxy, we focus simply on age and velocity (AV-R, where velocity, i.e., velocity dispersion is rendered in equivalent height given relevant dynamical assumptions) and ask: If NGC 891 has a similar star-formation history and AV-R as the MW's solar cylinder, what would the integrated light look like if observed edge-on? In answering this question our intent is to demonstrate the essence of our experimental design, namely a method to measure disk dynamical stratification rates by using stars as chronometers. To estimate the vertical distribution of the stellar spectral energy distribution we construct a simple model where stars in disks (i) form at some rate at the mid-plane with a scale-height and velocity dispersion comparable to the gas; and then (ii) are dynamically stratified through some secular process over time. We adopt a constant star-formation rate (SFR), a total age of 11 Gyr\footnote{This is a reasonable match for the total age of the MW disk and the star-formation formation rate over the past few Gyr based on analysis of stars in the solar cylinder \citep[e.g.,][]{Pilyugin96a}.}, the ``heating'' model for the solar cylinder of \citet{Aumer09}, and assume the dynamics are such that $\sigma_z^2/h_z$ is constant at late times, which is roughly what is observed in the MW disk near the sun ($\sigma_z$ is the vertical stellar velocity dispersion and $h_z$ is the corresponding scale-height, i.e., $\sigma_z^2/h_z$ is proportional to the surface mass-density of the disk). We also adopt the stellar population synthesis models of \citet{Bruzual03} but note that in the spectral range of interest here the choice is unimportant. By coupling elements (i) and (ii) to stellar population synthesis models it is then possible to compute the vertical distribution of star light. By adopting this simplified model to represent what we might see for the projected vertical distribution of integrated starlight of an edge-on galaxy like NGC 891 we are in effect assuming the SFR and dynamical stratification are constant with radius. This assumption, despite the apparent radial constancy of disk thickness observed over many radial scale-lengths in external galaxies, at some level must be incorrect. Certainly we know there is flaring in gas layers with radius as well as gradients in the total and specific SFR. In a companion paper we discuss details about assumptions of the gas-layer thickness between NGC 891 and the MW, radial variations in the gas-layer thickness and heating rate, relative sensitivity to the star-formation rate versus the heating rate, as well the general problem of heating models' inability to match the thick disk. The simplifications adopted here allow us to look at what appear to be the primary observational consequences of dynamical stratification on vertical trends in the spectral energy distributions of galaxies; this simple model is likely to capture the interplay of stellar evolutionary time-scales and dynamical stratification that dominate the younger and intermediate-age disk components in the MW \citep{Bird13}. \begin{figure*} \centering \includegraphics[width=\textwidth]{891_1/figs/mab_stack.pdf} \caption[Heating signature in Milky Way model]{\label{891_1:fig:MW_heating}\fixspacing Example of spectrophotometric index change with height and stellar population. Spectra are from a model with a fast-rotating (200 km s$^{-1}$) disk with a constant star-formation rate and a MW-like disk-heating rate, rendered at a spectral resolution of 200 km s$^{-1}$ ($\sigma$). \emph{bottom:} Note the transition from Balmer-line to metal-line dominance between 0.32 and \val{0.6}{kpc}. \emph{top:} The same model spectra plotted with height and color-coded by normalized intensity. The transition between Balmer-line and metal-line dominated populations is clear in \Hda and Ca H\&K. % Our models predict the location of the % transition is a measure of the disk heating time-scale and is % largely independent of the details of the star-formation % history. The green line shows the same transition in NGC 891 at \val{0.4}{kpc} as discussed in \S\ref{891_1:sec:results}.} \end{figure*} We illustrate the spectrophotometric rendering of our simple model in Figure \ref{891_1:fig:MW_heating}. This example shows how the spectra of integrated star-light would appear as a function of height above the disk mid-plane for a dynamical stratification rate self-similar to what is seen in the MW solar cylinder. Inspection shows that at roughly $z \approx \val{0.4}{kpc}$ there is a rapid transition in the stellar population. Here, the spectrum evolves from Balmer-absorption to metal-absorption dominance on the time-scale of A/F main-sequence stellar lifetimes ($\sim$0.5 Gyr). The break-height where this occurs is relatively insensitive to star-formation history; spectroscopic line-strengths are reddening independent, and line-of-sight attenuation from dust at this expected break-height is not prohibitive for detection. Consequently, measuring the break height yields a constraint on the dynamical stratification rate on these time-scales. The transition is best detected in spectra covering the \val{4000}{\AA} region, and can be measured easily even with low-resolution spectrophotometry. Our first-order objective is to determine if we can detect and measure a spectral break height in the disk of NGC 891, and if so, how it compares to the this simple model for the MW solar cylinder. The height and width over which the spectral break occurs yields a constraint of AV-R, or the dynamical stratification rate at late times. While this model may be qualitatively correct for the solar neighborhood and NGC 891, it is well worth verifying if the observed gradients are constant with projected radius. It has been suggested from the simulations of \citet{Martig14a} that the young, star-forming disk flares at every epoch, likely for dynamical reasons perhaps related to the flaring of atomic and molecular gas at the current epoch. Recent measurements suggest such flaring is seen in MW stars \citep{Ness16}. We will probe for a similar flaring in NGC 891, manifest as a change in break height with radius. While differential extinction effects cannot be ignored when interpreting line-of-sight depths of our observations, we will use Doppler tomography in Chapter \ref{chap:891_2} \citep[see for example,][]{Kregel05} to increase the fidelity of our measurements. Our higher-order objective is to probe whether the spectrophotometric data farther above the mid-plane enables us to extend constraints on the dynamical stratification over longer time frames commensurate with the old thin and thick disks. While the implementation is beyond the scope of the present work, the detection of metallicity and abundance gradients in both radius and height provides a mapping of AVM$\alpha$-R in external galaxies, and, via the appropriate chemo-dynamical model, constraints on the dynamical stratification for disk stars over most of the star-formation history of the galaxy. In this paper we describe the custom fiber integral field unit (IFU) we built to make measurements of vertical population gradients in nearby, edge-on disks (Section \ref{891_1:sec:pak}). Our observations of NGC 891 with this IFU are described in section \ref{891_1:sec:obs}. The data reduction and calibration procedures, particularly those specific to this first variable-pitch IFU, are given in section \ref{891_1:sec:data_reduction}. The resulting spectral maps are binned based on a signal-to-noise (S/N) criterion determined by astrophysically motivated requirements on determining age and metallicity in the integrated light of stellar populations. The requirements are determined via modeling in Section \ref{891_1:sec:maps}. Spectral diagnostics are measured for these binned spectra to examine of the trends in age, metallicity and abundance with radius and height in Section \ref{891_1:sec:results}. These give a qualitative picture of the population gradients in NGC 891, as projected along the line of sight. A synopsis of the results from this paper are given in Section \ref{891_1:sec:summary}. % For the younger and intermediate-age disk components which, for a disk % like the MW, contains most of the light and a significant amount of % the stellar disk mass over a modest range of metallicity (and % therefore plausibly similar IMF). The heating time-scale over the past % 4-6 Gyr in the solar neighborhood is comparable to the disk radial and % vertical dynamical time-scales \citep{Aumer09}; even quiescent disks % are not in true equilibrium. Determining disk heating rates pins down % We use a simple heating model of \citet{Aumer09}, suitable for the MW % solar cylinder, and based on Hipparcos proper-motion data and % Geneva-Copenhagen radial velocities. For each cohort of disk stars, % i.e., those that form at some epoch this yields a vertical velocity % dispersion that grows as \citep[using the formulation % of][]{Wielen77} $$\sigma_z(t) = \sigma_z(0) (1+t/\tau_{\rm % heat})^{1/n_h},$$ where $\tau_{\rm heat} \sim 5 \times 10^7$ yr is % the heating time-scale, $\sigma_z(0)$ is the velocity dispersion of % the stars at birth, the exponent $n_h$ is referred to as the heating % index, and time is moving forward from, and indexed to, the cohort % formation epoch. % Historically and physically, the model derives from % \citet{Spitzer51,Spitzer53} molecular-cloud scattering scenario where % the time-constant for energy transfer for the Solar neighborhood can % be interpreted in terms of molecular cloud mass and density, the % initial velocity-dispersion, and a complex function of $B/\omega$ % where $B$ is Oort's constant and $\omega = V_c/R$, i.e., the angular % velocity. What is relevant here is that the formulation provides a % simple, analytic model which describes well the phase-space % stratification of MW disk stars. % One further required model component relates the vertical thickness to % velocity dispersion. We assume the dynamics are such % that $\sigma_z^2/h_z$ is constant at late times, which is roughly what % is observed in the MW disk near the sun, but that the stellar layer % never gets thinner or dynamically colder than the gas layer at $h_z = % 0.065$ kpc and $\sigma_z = 8$ km s$^{-1}$. While the atomic gas layer % in NGC 891 is vertically very extended \citep{Swaters97, Oosterloo07}, % the relevant thickness is the molecular gas, which in NGC 891 is much % thinner, but about a factor of two thicker than the molecular disk in % the MW \citep{Scoville93, Yim11}. This is a pretty good match to the % near-infrared ($K$ band) super-thin disk component scale-height of 160 % pc found by \citet{Schechtman-Rook13}. Despite this difference, we % scale the stellar vertical distribution as a function of their age % such that they match the MW old thin-disk stars with $h_z = 0.350$ % kpc, $\sigma_z =20$ km s$^{-1}$, and an age of 7 Gyr. % The specified heating model, however, is parameterized for the solar % cylinder. In order for the disk to have constant thickness with % radius, yet a velocity dispersion profile that is roughly exponential % with twice the photometric scale length - what we observe for disks % today - at least one of three parameters change with radius: the % heating time-scale, the heating index, or the initial gas velocity % dispersion. While any one of these can be modulated with radius to % achieve a nearly constant vertical scale-height with time and radius, % it is compelling to keep the heating- index constant (i.e., the % physical heating mechanism is the same) while setting the heating % time-scale to be the dynamical time-scale associated with the disk's % rotation. Note for example that $\tau_{{\rm heat},\odot} \sim (\pi/2) % R_\odot/V_\odot$ adopting $V_\odot = 220$ km s$^{-1}$ and $R_\odot = % 8$ kpc as the circular speed and distance to the Galactic center at % the Solar radius. Except for the inner portion of the galaxy, % $\tau_{\rm heat}$ is simply a linear function of radius. % It is also reasonable to adopt an initial velocity dispersion of the % stars that has a modest trend with radius. This is plausible because % the gas also exhibits such a trend, as seen by \citet{Shostak84}, and % more recently updated by \citet{Tamburro09}. We will show elsewhere % that this produces a time-evolution of a stellar disk that uniformly % thickens while quickly developing a radial trend in stellar velocity % dispersion that matches observations in the DMS. % There is no chemical evolution in the model, but it would be % straight-forward to add following, e.g., % \citet{Pilyugin96a,Pilyugin96b,Chiappini97} based on the earlier % studies of \citet{Twarog80, Carlberg85, % Meusinger91,Edvardsson93}. From these studies, it is reasonable to % approximate the age-metallicity relation as linear in time with [Fe/H] % = 0.05 today and [Fe/H] -0.35 8 Gyr ago, then dropping rapidly at % earlier ages where it would be reasonable to adopt Z = 0.2 solar. % $h_z(t=7{\rm Gyr})=0.35$ kpc $\sigma_z(t=7{\rm Gyr}) = 20$ km % s$^{-1}$ Then $\sigma_z(t=0)$ is inferred from $\sigma_z(t=7{\rm % Gyr})$ using the heating model, but not allowed to be less than % $sigma_{z,{\rm min}} = \sigma_{\rm gas} / \sqrt{1/a^2+2/3)$ km % s$^{-1}$, where $a = 0.6$ (why?). The thicknes is given by $h_z(t) = % h_z(t=7{\rm Gyr})(\sigma_z(t)/\sigma_z(t=7{\rm Gyr}))^2$, with the % additional imposition that $h_z(t) \geq h_{z,{\rm min}} = 0.065$ % kpc, i.e., the gas layer in the MW, which at least for HI in N891, % is not a good match. However the relevant thickness is the molecular % gas, which in NGC 891 is comparable to the MW (Scoville et % al. 1993). % % For $n_h = 3$, $\tau = 0.05$ Gyr, while for $n_h = 2$, $\tau = 0.2$ % Gyr. These values are from Wielen (1977), but the $n_h = 3$ case is % consistent with Binney et al. (2000) analysis of Hipparcos data. % Further, this value of $\tau$ is close to the dynamical time-scale % $t_{\rm dyn} = 2 \pi R/V$ at the solar circle ($R = 8$ kpc, $V = 220$ % km s$^{-1}$). % another paper to look at for asymmetry and high-z dust: Hughes et al. 2014 %% \begin{figure} %% \centering %% \includegraphics[width=\columnwidth]{figs/mab_plot_model.pdf} %% \includegraphics[width=\columnwidth]{figs/mab_plot_modelspec.pdf} %% \caption{\label{fig:MW_heating}Example of spectrophotometric index change %% with height and stellar population. Spectra are from a model with a %% fast-rotating (200 km/s) disk with a constant star-formation rate and a %% MW-like disk-heating rate. Note the transition from Balmer-line to %% metal-line dominance between 0.8 and 1.5 hz. Our models predict the %% location of the transition is a measure of the disk heating time-scale and %% is largely independent of the details of the star-formation history.} %% \end{figure} %% \begin{figure} %% \centering %% \includegraphics[width=\columnwidth]{figs/mab_plot_model.pdf} %% \caption{\label{fig:mab_model}Another view of the spectrophotometric %% index change shown in Figure \ref{fig:MW_heating}. Color is mapped %% to intensity and all spectra have been normalized to unity mean %% over the wavelength interval shown. The horizontal line marks the %% age break at \val{0.4}{kpc} found in %% \S\ref{891_1:sec:results}. This break is remarkably consistent with the %% transition from Balmer-line to metal-line (Ca H\&K) dominance seen %% the models.} %% \end{figure}
{ "alphanum_fraction": 0.7620710598, "avg_line_length": 58.5422222222, "ext": "tex", "hexsha": "a57dd5f4a4ce3e967c29f153c555b70904c346c9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "113dfb95996777e2b36785d7ee80a824a671ab09", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "eigenbrot/eigenbrot-thesis", "max_forks_repo_path": "891_1/introduction1.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "113dfb95996777e2b36785d7ee80a824a671ab09", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "eigenbrot/eigenbrot-thesis", "max_issues_repo_path": "891_1/introduction1.tex", "max_line_length": 82, "max_stars_count": null, "max_stars_repo_head_hexsha": "113dfb95996777e2b36785d7ee80a824a671ab09", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "eigenbrot/eigenbrot-thesis", "max_stars_repo_path": "891_1/introduction1.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6475, "size": 26344 }
\section{Transmission Selection} As part of your design, you will need to select the type of transmission and subsequent specification of your chosen transmission. You will need to do some research on the types of transmission to be able to present a reasoned argument for your selection. This will be followed by the steps taken to select an initial specification of transmission. In this document, we will take your through the selection of a chain transmission. For other transmission types, you will have to look up the relevant guides and processes. Do not let this put you off though as the steps are fairly similar. \subsection{Transmission Types} \begin{framed} \vspace{1cm} \begin{center} \Large -- Own Research \& Lectures -- \end{center} \vspace{1cm} \end{framed} \subsection{Selecting a Chain Drive} The following selection of a chain drive uses Renold's Chain Guide\footnote{Acknowledgements to the Renold Chain Guide}. Other chain companies will have similar processes and it is important to follow the appropriate companies' process when selecting your transmission. \subsubsection{Features \& Considerations} An\marginnote{Centre Distance} incorrect centre distance leads to a higher wear rate and slack chain, which introduces further inefficiencies into the system. % Centre distances that are smaller than nominal can be attained through the use of a chain tensioner. When\marginnote{polygonal action} the driving sprocket of a chain drive runs at constant speed, the speed of the chain itself is not constant but is subject to periodic fluctuations. This fluctuation, which is caused by the fact that the chain when wrapped on a sprocket forms a polygon rather than a circle, is known as polygonal action~\cite{mahalingam1958}. One effect of polygonal action is to produce a periodic variation in the velocity ratio of the drive, and if the frequency of this variation coincides with a resonant frequency of the system, large stresses may occur. At high chain speeds the effects of impact are very complex; each impact sets up a train of travelling waves which, after reflection at the sprockets, combine with the next train and so on. \cref{fig-cyclic-speed-variation} highlights that polygonal actions' effect increases considerably as one decreases the no.\ of sprocket teeth below 19. Hence, Renold recommends that sprockets should have a minimum of 19 teeth to avoid the effect of polygonal action. If lower teeth are required, then an additional factor is applied when selecting a chain, which reduces the maximum rated speed of the chain. \begin{marginfigure} \centering \includegraphics[width=\textwidth]{07_transmission_selection/polygonal-action.png} \caption[Cyclic speed variation due to polygonal action]{Cyclic speed variation due to polygonal action~\citep[p.24]{renoldchain}} \label{fig-cyclic-speed-variation} \end{marginfigure} It\marginnote{cranked link} is always best practice and desirable to have a chain with an even number of links. However, when there is a case that requires an odd linked chain, cranked links are used to form the chain loop. \cref{fig-cranked-link} shows an example of a cranked link within a chain. The link has a natural kink in the profile to allow it to connect to the next link in the chain. This inherently places additional stress concentrations within the component and thus, is not recommended for high load and/or speed chain drives. \begin{marginfigure} \centering \includegraphics[width=\textwidth]{07_transmission_selection/cranked-link.png} \caption[Cranked link]{Cranked link~\citep[p.11]{renoldchain}} \label{fig-cranked-link} \end{marginfigure} \subsubsection{Selection Process} The selection process for a Renold chain is as follows. The prerequisites for the process is that you know the torque and sprocket speeds that you wish to run at. The\marginnote{Selection Power (\(P_s\))} selection power is calculated by multiplying the power you wish to transmit (\(P_s\)) by two safety factors \(f_1\) and \(f_2\). \begin{equation} P_s = f_1f_2P_t \end{equation} \noindent Where: \begin{description} \item[\(f_1\)] Safety factor depending on your operation and can be found using chart 2 on p.28 of the Renold catalogue \item[\(f_2\)] Adjustment factor is your are using the non-recommended number of driving sprocket teeth. \(f_2=\frac{19}{\text{No. of teeth on driving}}\) \end{description} With\marginnote{Selecting a Suitable Chain Pitch} the selection power determined and the driver sprocket speed known, one can determine the chain pitch using \cref{fig-chain-pitch}. These rating charts have been created from years of empirical testing and theoretical modelling by the chain companies, and dictate the safe operating windows for their selection of chains. \begin{figure}[h!] \centering \includegraphics[width=0.9\textwidth]{07_transmission_selection/chain-pitch.png} \caption[Chain pitch selection chart]{Chain pitch selection chart~\citep[p.31]{renoldchain}} \label{fig-chain-pitch} \end{figure} After\marginnote{Select Chain and Sprockets} selecting the chain pitch, you can go through the catalogue to find the drive and driven sprockets that will give you the required (or as near as possible) ratio for your design. Having\marginnote{Chain Links and Centre Distance} the chain pitch and sprockets selected, the centre distance \begin{equation} L = \frac{Z_1+Z_2}{2}+\frac{2C_1}{P}+\frac{P{\left(\frac{Z_2-Z_1}{2\pi}\right)}^2}{C_1} \end{equation} \noindent{} Where: \begin{description} \item[\(Z_1\)] Number of driving teeth \item[\(Z_2\)] Number of driven teeth \item[\(C_1\)] Approximate desired centre distance \item[\(P\)] Chain pitch \item[\(L\)] Number of chain links \end{description} Once calculated, you then round to the nearest even integer and use this to calculate your centre distance \(C\). \begin{equation} C=\frac{P}{8} \left[ 2L-Z_2-Z_1 + \sqrt{ {\left(2L-Z_2-z_1\right)}^2 - \left(\frac{\pi}{3.88}\left(Z_2-Z_1\right)^2\right) } \right] \end{equation} \subsubsection{Chain Example} Select a chain and sprocket to transmit \SI{2}{\kilo\watt} of power at \SI{20}{rpm}. A ratio of 1:2 is required, driven by an electric motor with moderate shocks expected from the load. Space is highly limited. Using\marginnote{determining \(f_1\) and \(f_2\)} Chart 2 on p.28 of the Renold catalogue, we identify that \(f_1=1.4\) for our context and we will also use the recommended number of teeth on the driving sprocket (19), which makes \(f_2=1\). Therefore, the selection power \(P_s\) becomes: \begin{equation} P_s = f_1f_2P_t = 2\si{\kilo\watt}\times1.4\times1=2.8\si{\kilo\watt} \end{equation} Using\marginnote{chain pitch} \cref{fig-chain-pitch}, we can find the chain pitches that are suitable for our application. The potential pitches are: \begin{itemize} \item \SI{38.10}{\milli\metre} simplex \item \SI{31.75}{\milli\metre} duplex \item \SI{25.40}{\milli\metre} triplex \end{itemize} For\marginnote{chain and sprocket selection} this example, we will select the triplex as our requirement is to have a compact solution. Now it is just a case of looking in the tables to find a sprocket set (Renolds, p.75) that will provide us with a ratio that provides us with the closest match in terms of our desired ratio, and a triplex chain (Renolds, p.25). \begin{description} \item[Chain] 12B-3 \item[Driving Sprocket] 16B1/19T \item[Driven Sprocket] 16B1/38T \end{description} Now\marginnote{chain links} we have the sprockets and chain selected, it is then a simple case of using their specifications to determine the number of links: \begin{equation} L = \frac{19+38}{2} + \frac{2\times571.5}{19.05}+\frac{19.05{\left(\frac{38-19}{2\pi}\right)}^2}{571.5} = 88.6 \end{equation} Rounding this up to the nearest even integer gives us \(L=90\). The\marginnote{centre distance} final calculation is to determine the centre distance: % https://www.sharelatex.com/learn/Aligning_equations_with_amsmath \begin{equation} \begin{split} C & = \frac{19.05}{8} \left[ 2(90)-38-19 + \sqrt{ {\left(2(90)-38-19\right)}^2 - \left(\frac{\pi}{3.88}\left(38-19\right)^2\right) } \right] \\ & = \SI{583}{\milli\metre} \end{split} \end{equation} \subsection{Selecting a vee-belt or tooth belt} \begin{framed} \vspace{1cm} \begin{center} \Large -- Own Research \& Lectures -- \end{center} \vspace{1cm} \end{framed}
{ "alphanum_fraction": 0.7486923166, "avg_line_length": 50.9053254438, "ext": "tex", "hexsha": "0b2fc28cba57096e4a986289d5e8de659624e59a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0249e0804538237df58e5a7f99d039b35b30099d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JamesGopsill/ShaftDesignCourseNotes", "max_forks_repo_path": "07_transmission_selection/section.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0249e0804538237df58e5a7f99d039b35b30099d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JamesGopsill/ShaftDesignCourseNotes", "max_issues_repo_path": "07_transmission_selection/section.tex", "max_line_length": 589, "max_stars_count": null, "max_stars_repo_head_hexsha": "0249e0804538237df58e5a7f99d039b35b30099d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "JamesGopsill/ShaftDesignCourseNotes", "max_stars_repo_path": "07_transmission_selection/section.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2345, "size": 8603 }
\section{Upgrading from the 8.4 series to the 8.6 series of HTCondor}\label{sec:to-8.6} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \index{upgrading!items to be aware of} Upgrading from the 8.4 series of HTCondor to the 8.6 series will bring new features introduced in the 8.5 series of HTCondor. These new features include the following (note that this list contains only the most significant changes; a full list of changes can be found in the version history:~\ref{sec:History-8-5}): \begin{itemize} \item \Condor{q}-related changes: \begin{itemize} \item \Condor{q} now defaults to showing only the current user's jobs. \Ticket{5271} Similarly, \Condor{qedit} defaults to editing only jobs owned by the current user. \Ticket{5889} (The previous behavior of both commands can be restored by setting \MacroNI{CONDOR\_Q\_ONLY\_MY\_JOBS} to \Expr{False} -- see ~\ref{param:CondorQOnlyMyJobs}.) \item \Condor{q} now defaults to batch mode, which produces a single line of output summarizing a batch of jobs (see~\pageref{batches-of-jobs}). \Ticket{5708} (The previous behavior can be restored by setting \MacroNI{CONDOR\_Q\_DASH\_BATCH\_IS\_DEFAULT} to \Expr{False} -- see ~\ref{param:CondorQDashBatchIsDefault}.) \item \Condor{q} (and \Condor{history} and \Condor{status}) can now read and write JSON, XML, and new ClassAd formats (see~\pageref{man-condor-q}, ~\pageref{man-condor-history}, and ~\pageref{man-condor-status}). \Ticket{5688} \Ticket{5844} \Ticket{5820} \end{itemize} \item Job submission-related changes: \begin{itemize} \item Added the ability for the \Condor{schedd} to transform job ClassAds upon job submission (see section~\ref{sec:Schedd-Config-Job-Transforms}). \item Added the ability to group jobs into batches, and assign names to the batches, using the new \Opt{-batch} arguments to \Condor{submit} and \Condor{submit\_dag}. \item Added support in the submit language for retrying jobs if they fail (see~\pageref{condor-submit-max-retries}). \end{itemize} \item \Condor{dagman}-related changes: \begin{itemize} \item Added the ability to define SCRIPTS, VARS, etc., for all nodes in a DAG with a single command (see section~\ref{sec:DAGAllNodes}). \Ticket{5729} \item Simplified how DAG node priorities work (see section~\ref{sec:DAG-SetNodePriority}). This means that existing DAGs that use the node priority feature will run differently than they have in the past. \Ticket{4024} \Ticket{5749} \item Added the new splice connection feature (see section~\ref{sec:DAGSpliceConnections}), which allows more flexible dependencies between splices. \Ticket{5213} \end{itemize} \item HTCondor can now use IPv6 interfaces; it prefers IPv4 if both IPv4 and IPv6 are available. \Ticket{5104} \item HTCondor now has initial support for Singularity containers (see section~\ref{sec:singularity-support}). \Ticket{5828} \item \Condor{status} can now display a single line of output for each machine (rather than a line per slot). \Ticket{5596} \item A number of improvements to the Python bindings including: submission \Ticket{5666} \Ticket{4916}; draining \Ticket{5507}; per-thread security contexts \Ticket{5632}; Computing-On-Demand support \Ticket{5130}; and multiple query support \Ticket{5187} \item Jobs can now be submitted to the Slurm batch scheduling system via the new \SubmitCmdNI{slurm} type in the grid universe. \Ticket{5515} \item Numerous improvements to Docker support, including \Ticket{5680}; \Ticket{5760}; \Ticket{5761}; \Ticket{5750}; \Ticket{5740}; \Ticket{5609}; \Ticket{5456} \end{itemize} Upgrading from the 8.4 series of HTCondor to the 8.6 series will also introduce changes that administrators and users of sites running from an older HTCondor version should be aware of when planning an upgrade. Here is a list of items that administrators should be aware of. \begin{itemize} \item Shared port (see section~\ref{sec:shared-port-daemon}) is now enabled by default; set \MacroNI{USE\_SHARED\_PORT} to \Expr{False} to disable it. Note that this configuration macro does not control the HAD or replication daemon's use of shared port; use \MacroNI{HAD\_USE\_SHARED\_PORT} or \MacroNI{REPLICATION\_USE\_SHARED\_PORT} instead. See section ~\ref{sec:HA-configuration} for more details on how to configure HAD (and/or the replication daemon) to work with shared port, since just activating shared port without any other configuration change will not work. \Ticket{3813} \Ticket{5103} \item To mitigate performance problems, \MacroNI{LOWPORT} and \MacroNI{HIGHPORT} no longer restrict outbound port ranges on Windows. To re-enable this functionality, set \MacroNI{OUT\_LOWPORT} and \MacroNI{OUT\_HIGHPORT} (see ~\ref{param:OutLowPort} and ~\ref{param:OutHighPort}). \Ticket{4711} \item Cgroups (see section~\ref{sec:CGroupTracking}) are now enabled by default. This means that if you have partitionable slots, jobs need to get \SubmitCmd{request\_memory} correct. \Ticket{5936} \item By default, \Condor{q} queries only the current user's jobs, unless the current user is a queue superuser or the \MacroNI{CONDOR\_Q\_ONLY\_MY\_JOBS} configuration macro is set to \Expr{False}. \Ticket{5271} \item Added support for immutable and protected job attributes, which makes SUBMIT\_REQUIREMENTS more useful (see section~\ref{param:ImmutableJobAttrs}). \Ticket{5065} \item By default, the \Condor{schedd} no longer changes the ownership of spooled job files (they remain owned by the submitting user). \Ticket{5226} \item When \MacroNI{SEC\_ENABLE\_MATCH\_PASSWORD\_AUTHENTICATION} is set to \Expr{True}, the related authorizations are now automatically enabled. \Ticket{5304} (See ~\ref{param:SecEnableMatchPasswordAuthentication} for details.) \item The master can now run an administrator-defined script at shutdown; see section~\ref{param:DefaultMasterShutdownScript} for details. \Ticket{5590} \end{itemize}
{ "alphanum_fraction": 0.7529079428, "avg_line_length": 34.1931818182, "ext": "tex", "hexsha": "9ff96b060572c551b0e729aba94924924e33dcc0", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-07-14T20:20:05.000Z", "max_forks_repo_forks_event_min_datetime": "2017-11-09T01:42:58.000Z", "max_forks_repo_head_hexsha": "9e00a5874cc2579f5fdc81bb778f540b40b48c87", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "pavlo-svirin/htcondor", "max_forks_repo_path": "doc/version-history/upgradingto8-6.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9e00a5874cc2579f5fdc81bb778f540b40b48c87", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "pavlo-svirin/htcondor", "max_issues_repo_path": "doc/version-history/upgradingto8-6.tex", "max_line_length": 87, "max_stars_count": 1, "max_stars_repo_head_hexsha": "9e00a5874cc2579f5fdc81bb778f540b40b48c87", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "pavlo-svirin/htcondor", "max_stars_repo_path": "doc/version-history/upgradingto8-6.tex", "max_stars_repo_stars_event_max_datetime": "2015-05-22T16:26:34.000Z", "max_stars_repo_stars_event_min_datetime": "2015-05-22T16:26:34.000Z", "num_tokens": 1714, "size": 6018 }
\documentclass[12pt]{article} \usepackage{amsmath, mathtools} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{graphicx} \usepackage{colortbl} \usepackage{xr} \usepackage{hyperref} \usepackage{longtable} \usepackage{xfrac} \usepackage{tabularx} \usepackage{float} \usepackage{siunitx} \usepackage{booktabs} \usepackage{caption} \usepackage{pdflscape} \usepackage{afterpage} \usepackage[round]{natbib} %\usepackage{refcheck} \hypersetup{ bookmarks=true, % show bookmarks bar? colorlinks=true, % false: boxed links; true: colored links linkcolor=red, % color of internal links (change box color with linkbordercolor) citecolor=green, % color of links to bibliography filecolor=magenta, % color of file links urlcolor=cyan % color of external links } \input{../Comments} % For easy change of table widths \newcommand{\colZwidth}{1.0\textwidth} \newcommand{\colAwidth}{0.13\textwidth} \newcommand{\colBwidth}{0.82\textwidth} \newcommand{\colCwidth}{0.1\textwidth} \newcommand{\colDwidth}{0.05\textwidth} \newcommand{\colEwidth}{0.8\textwidth} \newcommand{\colFwidth}{0.17\textwidth} \newcommand{\colGwidth}{0.5\textwidth} \newcommand{\colHwidth}{0.28\textwidth} % Used so that cross-references have a meaningful prefix \newcounter{defnum} %Definition Number \newcommand{\dthedefnum}{GD\thedefnum} \newcommand{\dref}[1]{GD\ref{#1}} \newcounter{datadefnum} %Datadefinition Number \newcommand{\ddthedatadefnum}{DD\thedatadefnum} \newcommand{\ddref}[1]{DD\ref{#1}} \newcounter{theorynum} %Theory Number \newcommand{\tthetheorynum}{T\thetheorynum} \newcommand{\tref}[1]{T\ref{#1}} \newcounter{tablenum} %Table Number \newcommand{\tbthetablenum}{T\thetablenum} \newcommand{\tbref}[1]{TB\ref{#1}} \newcounter{assumpnum} %Assumption Number \newcommand{\atheassumpnum}{P\theassumpnum} \newcommand{\aref}[1]{A\ref{#1}} \newcounter{goalnum} %Goal Number \newcommand{\gthegoalnum}{P\thegoalnum} \newcommand{\gsref}[1]{GS\ref{#1}} \newcounter{instnum} %Instance Number \newcommand{\itheinstnum}{IM\theinstnum} \newcommand{\iref}[1]{IM\ref{#1}} \newcounter{reqnum} %Requirement Number \newcommand{\rthereqnum}{P\thereqnum} \newcommand{\rref}[1]{R\ref{#1}} \newcounter{lcnum} %Likely change number \newcommand{\lthelcnum}{LC\thelcnum} \newcommand{\lcref}[1]{LC\ref{#1}} \newcommand{\famname}{FamName} % PUT YOUR PROGRAM NAME HERE \usepackage{fullpage} \begin{document} \title{Program Family Title \plt{\famname should appear in the title}} \author{Author Name} \date{\today} \maketitle ~\newpage \pagenumbering{roman} \plt{The CA template is related to the SRS template. Many of the sections are in common. The notes and advice for the SRS template are not reproduced here. Please have a look at the SRS template for advice.} \plt{This CA template is based on \citet{Smith2006}. An example for a family of material models is given in \citet{SmithMcCutchanAndCarette2017}. This example is for a physics based family. Often the families will be based on generic numerical techniques, rather than physics.} \plt{A good mindset for thinking about the families is often to think of the family as providing a library of services, as opposed to a single executable. The library of services can be used to build an application that uses a subset of the services, which is like providing the smaller library as a single family member.} \plt{In CAS 741, you will not have to implement the entire family. We will decide on a reasonable subset of the family for implementation.} \section{Revision History} \begin{tabularx}{\textwidth}{p{3cm}p{2cm}X} \toprule {\bf Date} & {\bf Version} & {\bf Notes}\\ \midrule Date 1 & 1.0 & Notes\\ Date 2 & 1.1 & Notes\\ \bottomrule \end{tabularx} ~\newpage \section{Reference Material} This section records information for easy reference. \subsection{Table of Units} Throughout this document SI (Syst\`{e}me International d'Unit\'{e}s) is employed as the unit system. In addition to the basic units, several derived units are used as described below. For each unit, the symbol is given followed by a description of the unit and the SI name. ~\newline \renewcommand{\arraystretch}{1.2} %\begin{table}[ht] \noindent \begin{tabular}{l l l} \toprule \textbf{symbol} & \textbf{unit} & \textbf{SI}\\ \midrule \si{\metre} & length & metre\\ \si{\kilogram} & mass & kilogram\\ \si{\second} & time & second\\ \si{\celsius} & temperature & centigrade\\ \si{\joule} & energy & Joule\\ \si{\watt} & power & Watt (W = \si{\joule\per\second})\\ \bottomrule \end{tabular} % \caption{Provide a caption} %\end{table} \plt{Only include the units that your CA actually uses. If there are no units for your problem, like for a general purpose library, you should still include the heading, with the content ``not applicable'' (or similar).} \subsection{Table of Symbols} The table that follows summarizes the symbols used in this document along with their units. The choice of symbols was made to be consistent with the heat transfer literature and with existing documentation for solar water heating systems. The symbols are listed in alphabetical order. \renewcommand{\arraystretch}{1.2} %\noindent \begin{tabularx}{1.0\textwidth}{l l X} \noindent \begin{longtable*}{l l p{12cm}} \toprule \textbf{symbol} & \textbf{unit} & \textbf{description}\\ \midrule $A_C$ & \si[per-mode=symbol] {\square\metre} & coil surface area \\ $A_\text{in}$ & \si[per-mode=symbol] {\square\metre} & surface area over which heat is transferred in \\ \bottomrule \end{longtable*} \plt{Use your problems actual symbols. The si package is a good idea to use for units.} \plt{For the case of a generic numerical library, units will likely not be included. For instance, a linear ODE solver will not know the units of its coefficients.} \subsection{Abbreviations and Acronyms} \renewcommand{\arraystretch}{1.2} \begin{tabular}{l l} \toprule \textbf{symbol} & \textbf{description}\\ \midrule A & Assumption\\ DD & Data Definition\\ GD & General Definition\\ GS & Goal Statement\\ IM & Instance Model\\ LC & Likely Change\\ PS & Physical System Description\\ R & Requirement\\ SRS & Software Requirements Specification\\ \famname{} & \plt{put your famram name here}\\ T & Theoretical Model\\ \bottomrule \end{tabular}\\ \plt{Add any other abbreviations or acronyms that you add.} \plt{Only include abbreviations and acronyms that are actually used.} \newpage \tableofcontents ~\newpage \pagenumbering{arabic} \section{Introduction} \plt{This CA template is based on \citet{Smith2006}. It will get you started, but you will have to make changes. Any changes to section headings should be approved by the instructor, since that implies a deviation from the template. Although the bits shown below do not include type information, you may need to add this information for your problem.} \plt{Feel free to change the appearance of the report by modifying the LaTeX commands.} \subsection{Purpose of Document} \subsection{Scope of the Family} \subsection{Characteristics of Intended Reader} \subsection{Organization of Document} \section{General System Description} This section identifies the interfaces between the system and its environment, describes the potential user characteristics and lists the potential system constraints. \subsection{Potential System Contexts} \plt{Your system context will likely include an explicit list of user and system responsibilities} \begin{itemize} \item User Responsibilities: \begin{itemize} \item \end{itemize} \item \famname{} Responsibilities: \begin{itemize} \item Detect data type mismatch, such as a string of characters instead of a floating point number \item \end{itemize} \end{itemize} \subsection{Potential User Characteristics} \label{SecUserCharacteristics} The end user of \famname{} should have an understanding of undergraduate Level 1 Calculus and Physics. \subsection{Potential System Constraints} \plt{You may not have any system constraints.} \plt{If you need to make design decisions for your family, these decisions will be made here as constraints. For instance, if all inputs will have to use the same file format, this would be a constraint that would be included here.} \plt{You should generally limit the number of constraints, to keep the CA abstract.} \section{Commonalities} \subsection{Background Overview} \label{Sec_Background} \subsection{Terminology and Definitions} This subsection provides a list of terms that are used in the subsequent sections and their meaning, with the purpose of reducing ambiguity and making it easier to correctly understand the requirements: \begin{itemize} \item \end{itemize} \subsection{Data Definitions} \label{sec_datadef} This section collects and defines all the data needed to build the instance models. The dimension of each quantity is also given. \plt{Modify the examples below for your problem, and add additional definitions as appropriate.} ~\newline \noindent \begin{minipage}{\textwidth} \renewcommand*{\arraystretch}{1.5} \begin{tabular}{| p{\colAwidth} | p{\colBwidth}|} \hline \rowcolor[gray]{0.9} Number& DD\refstepcounter{datadefnum}\thedatadefnum \label{FluxCoil}\\ \hline Label& \bf Heat flux out of coil\\ \hline Symbol &$q_C$\\ \hline % Units& $Mt^{-3}$\\ % \hline SI Units & \si{\watt\per\square\metre}\\ \hline Equation&$q_C(t) = h_C (T_C - T_W(t))$, over area $A_C$\\ \hline Description & $T_C$ is the temperature of the coil (\si{\celsius}). $T_W$ is the temperature of the water (\si{\celsius}). The heat flux out of the coil, $q_C$ (\si{\watt\per\square\metre}), is found by assuming that Newton's Law of Cooling applies (\aref{A_Newt_coil}). This law (\dref{NL}) is used on the surface of the coil, which has area $A_C$ (\si{\square\metre}) and heat transfer coefficient $h_C$ (\si{\watt\per\square\metre\per\celsius}). This equation assumes that the temperature of the coil is constant over time (\aref{A_tcoil}) and that it does not vary along the length of the coil (\aref{A_tlcoil}). \\ \hline Sources& Citation here\\ \hline Ref.\ By & \iref{ewat}\\ \hline \end{tabular} \end{minipage}\\ \subsection{Goal Statements} \noindent Given the \plt{inputs}, the goal statements are: \begin{itemize} \item[GS\refstepcounter{goalnum}\thegoalnum \label{G_meaningfulLabel}:] \plt{One sentence description of the goal. There may be more than one. Each Goal should have a meaningful label.} \end{itemize} \subsection{Theoretical Models} \label{sec_theoretical} This section focuses on the general equations and laws that \famname{} is based on. \plt{Modify the examples below for your problem, and add additional models as appropriate.} ~\newline \noindent \begin{minipage}{\textwidth} \renewcommand*{\arraystretch}{1.5} \begin{tabular}{| p{\colAwidth} | p{\colBwidth}|} \hline \rowcolor[gray]{0.9} Number& T\refstepcounter{theorynum}\thetheorynum \label{T_COE}\\ \hline Label&\bf Conservation of thermal energy\\ \hline Equation& $-{\bf \nabla \cdot q} + g$ = $\rho C \frac{\partial T}{\partial t}$\\ \hline Description & The above equation gives the conservation of energy for transient heat transfer in a material of specific heat capacity $C$ (\si{\joule\per\kilogram\per\celsius}) and density $\rho$ (\si{\kilogram\per\cubic\metre}), where $\bf q$ is the thermal flux vector (\si{\watt\per\square\metre}), $g$ is the volumetric heat generation (\si{\watt\per\cubic\metre}), $T$ is the temperature (\si{\celsius}), $t$ is time (\si{\second}), and $\nabla$ is the gradient operator. For this equation to apply, other forms of energy, such as mechanical energy, are assumed to be negligible in the system (\aref{A_OnlyThermalEnergy}). In general, the material properties ($\rho$ and $C$) depend on temperature.\\ \hline Source & \url{http://www.efunda.com/formulae/heat_transfer/conduction/overview_cond.cfm}\\ % The above web link should be replaced with a proper citation to a publication \hline Ref.\ By & \dref{ROCT}\\ \hline \end{tabular} \end{minipage}\\ ~\newline \plt{In a CA, the TMs often do not need to be refined. However, this is not a rule. In some cases, it may make sense to introduce an IM, or possibly even a GD in between the TM and the IM.} \section{Variabilities} \plt{The variabilities are summarized in the following subsections. They may each be summarized separately, like in \citet{SmithMcCutchanAndCarette2017}, or in a table, as in \citet{Smith2006}.} \plt{For each variability, a description should be given, along with the parameters of variation and the binding time. The parameters of variation give the type that defines possible values. The binding time is when the variability is set. The possible values are specification time (scope time), build time and run time.} \subsection{Assumptions} \begin{itemize} \item[A\refstepcounter{assumpnum}\theassumpnum \label{A_meaningfulLabel}:] \plt{Short description of each assumption. Each assumption should have a meaningful label. Use cross-references to identify the appropriate traceability to T, GD, DD etc., using commands like dref, ddref etc.} \end{itemize} \plt{Input assumptions will be appropriate for many problems. Some input will have simplifying constraints, and other inputs will not.} \subsection{Calculation} \label{sec_Calculation} \plt{The calculation variabilities should be as abstract as possible. If there are variabilities that are related to imposed design decisions, the system constraints section should be referenced for the relevant constraint. Design constraint related variabilities should be listed separately.} \plt{Variabilities related to data structure choices would go in this section. However, these variabilities are related to design, so they should be separated from the more abstract variabilities.} \plt{Algorithmic variations would go here as well, but as for data structures, they should be separated from the more abstract variabilities.} \subsection{Output} \label{sec_Output} \section{Requirements} This section provides the functional requirements, the business tasks that the software is expected to complete, and the nonfunctional requirements, the qualities that the software is expected to exhibit. \subsection{Family of Functional Requirements} \plt{Since the CA will often be applied to a library, the functionality will not be a single use case. Therefore, this section should summarize the family of potential requirements. A good way to provide an overview of the functional requirements would be to provide multiple use cases on how the library will be employed.} \noindent \begin{itemize} \item[R\refstepcounter{reqnum}\thereqnum \label{R_Inputs}:] \plt{Requirements for the inputs that are supplied by the user. This information has to be explicit.} \item[R\refstepcounter{reqnum}\thereqnum \label{R_OutputInputs}:] \plt{It isn't always required, but often echoing the inputs as part of the output is a good idea.} \item[R\refstepcounter{reqnum}\thereqnum \label{R_Calculate}:] \plt{Calculation related requirements.} \item[R\refstepcounter{reqnum}\thereqnum \label{R_VerifyOutput}:] \plt{Verification related requirements.} \item[R\refstepcounter{reqnum}\thereqnum \label{R_Output}:] \plt{Output related requirements.} \end{itemize} \subsection{Nonfunctional Requirements} \plt{To allow the Non-Functional Requirements (NFRs) to vary between family members, try to parameterize them. The value of the parameter is than a variability.} \plt{An important variability between family members it the relative importance of the NFRs. \citet{Smith2006} shows how pairwise comparisons can be used to rank the importance of NFRs.} \plt{List your nonfunctional requirements. You may consider using a fit criterion to make them verifiable.} \section{Likely Changes} \noindent \begin{itemize} \item[LC\refstepcounter{lcnum}\thelcnum\label{LC_meaningfulLabel}:] \plt{If there is a ranking of variabilities, or combinations of variabilities, that are more likely, this information can be included here.} \end{itemize} \section{Traceability Matrices and Graphs} \plt{You will have to add tables.} \newpage \bibliographystyle {plainnat} \bibliography {../../refs/References} \newpage \section{Appendix} \plt{Your report may require an appendix. For instance, this is a good point to show the values of the symbolic parameters introduced in the report.} \subsection{Symbolic Parameters} \plt{The definition of the requirements will likely call for SYMBOLIC\_CONSTANTS. Their values are defined in this section for easy maintenance.} \noindent \plt{Advice on using the template: \begin{itemize} \item Assumptions have to be invoked somewhere \item ``Referenced by'' implies that there is an explicit reference \item Think of traceability matrix, list of assumption invocations and list of reference by fields as automatically generatable \item If you say the format of the output (plot, table etc), then your requirement could be more abstract \item For families the notion of binding time should be introduced \item Think of families as a library, not as a single program \end{itemize} } \subsection{First Stage of Implementation} \plt{In this section specify the family member, or sub-family, that you will be implementing for CAS 741. You should specify the value for all of your variabilities, along with the binding time. A tabular representation will probably be the easiest way to convey this information.} \end{document}
{ "alphanum_fraction": 0.7395027624, "avg_line_length": 34.2803030303, "ext": "tex", "hexsha": "62c97eeeb3f5a7cd2b5c0f8b26598b26e016263c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a2d090bde6e8862a58b784f7cbdb51b4d2c11a9b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "shmouses/MLSA", "max_forks_repo_path": "doc/SRS/CA.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a2d090bde6e8862a58b784f7cbdb51b4d2c11a9b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "shmouses/MLSA", "max_issues_repo_path": "doc/SRS/CA.tex", "max_line_length": 138, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a2d090bde6e8862a58b784f7cbdb51b4d2c11a9b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "shmouses/MLSA", "max_stars_repo_path": "doc/SRS/CA.tex", "max_stars_repo_stars_event_max_datetime": "2021-01-22T15:43:17.000Z", "max_stars_repo_stars_event_min_datetime": "2021-01-22T15:43:17.000Z", "num_tokens": 5011, "size": 18100 }
\title{Introduction to Programming in Python (2)} \author{Steve Renals \newline \mbox{ }[email protected]\mbox{ }} \date{ICL --- 29 September 2005} \begin{document} \frame{\titlepage} \mode<article>{\section[Outline]{ICL/Introduction to Python 2/2005-09-29}} \mode<presentation>{ \section[Outline]{} } \frame{\tableofcontents} \section{Review} \begin{frame}[fragile] \frametitle{Review} \begin{itemize} \item<1-> \textbf{Python modules} \begin{itemize} \item Every file ending in \texttt{.py} is a Python module. \item Modules can contain attributes such as functions and variables. \end{itemize} \item<2-> \textbf{Built-in objects}: numbers, strings, lists, dictionaries, tuples, files \end{itemize} \pause {\small \begin{verbatim} level = { 'icl' : 9, 'nlssd' : 11, 'inf2b' : 8} x = level['nlssd'] # 11 n = len(level) # 3 flag = level.has_key('inf2b') # True l = level.keys() # ['nlssd', 'inf2b', 'icl'] l.sort() for x in l: print l,level[l] \end{verbatim} } \end{frame} \begin{frame} \frametitle{Files and tuples} \textbf{Tuples}: like lists, but immutable (cannot be changed) \pause \begin{semiverbatim}\small emptyT = () \newline T1 = (1, 2, 3)\newline x = T1[1]\newline n = len(T1) \end{semiverbatim} \pause \textbf{Files}: objects with methods for reading and writing to files \pause \begin{semiverbatim}\small fil = open('myfile', 'w')\newline fil.write('hello file\\n')\newline fil.close()\newline \end{semiverbatim} \pause \begin{semiverbatim}\small f2 = open('myfile', 'r')\newline s = f2.readline() \# 'hello file\\n'\newline t = f2.readline() \# '' \end{semiverbatim} (\emph{Learning Python}, chapter 7) \end{frame} \section{Control Flow} \subsection{Conditionals} \begin{frame}[fragile] \frametitle{if tests} {\small \begin{verbatim} course = 'icl' if course == 'icl': print 'Ewan / Steve' print 'Semester 1' elif course == 'dil': print 'Phillip' print 'Semester 2' else: print 'Someone else' print 'Some semester' \end{verbatim}} \pause \begin{itemize} \item \textbf{Indentation determines the block structure} \item Indentation enforces readability \item Tests after \texttt{if} and \texttt{elif} can be anything that returns True/False \end{itemize} (\emph{Learning Python}, chapter 9) \end{frame} \subsection{Loops} \begin{frame}[fragile] \frametitle{while loops} A while loop keeps iterating while the test at the top remains True. {\small \begin{verbatim} a = 0 b = 10 while a < b: print a a = a + 1 \end{verbatim} \pause \begin{verbatim} s = 'icl' while len(s) > 0: print s s = s[1:] \end{verbatim}} (\emph{Learning Python}, chapter 10) \end{frame} \begin{frame}[fragile] \frametitle{for loops} \texttt{for} is used to step through any sequence object {\small \begin{verbatim} l = ['a', 'b', 'c'] for i in l: print i \end{verbatim}} \pause {\small \begin{verbatim} sum = 0 for x in [1, 2, 3, 4, 5, 6]: sum = sum + x print sum \end{verbatim}} \pause \texttt{range()} is a useful function: {\small \begin{verbatim} range(5) = [0, 1, 2, 3, 4] range(2, 5) = [2, 3, 4] range(0, 6, 2) = [0, 2, 4] \end{verbatim}} \end{frame} \begin{frame}[fragile] \frametitle{for loops with style} Do something to each item in a list (eg print its square) \begin{verbatim} l = [1, 2, 3, 4, 5, 6, 7, 8] # or l = range(1,9) # one way to print the square for x in l: print x*x \end{verbatim} \pause \begin{verbatim} # another way to do it n = len(l) for i in range(l): print l[i]*l[i] \end{verbatim} \pause Which is better? \pause The top one... Iterate directly over the sequence, try to avoid using counter-based loops... \end{frame} \begin{frame}[fragile] \frametitle{Example: intersecting sequences} The intersection of \newline \texttt{['a', 'd', 'f', 'g']} and \texttt{['a', 'b', 'c', 'd']} \newline is \texttt{['a', 'd']} \pause {\small \begin{verbatim} l1 = ['a', 'd', 'f', 'g'] l2 = ['a', 'b', 'c', 'd'] res = [] for x in l1: for y in l2: if x == y: res.append(x) \end{verbatim}} \pause {\small \begin{verbatim} res = [] for x in l1: if x in l2: res.append(x) # res = ['a', 'd'] \end{verbatim}} \end{frame} \section{Functions} \subsection{Function basics} \begin{frame}[fragile] \frametitle{Built-in, imported and user-defined functions} \begin{itemize} \item<1-> Some functions are built-in, eg: \begin{verbatim} l = len(['a', 'b', 'c']) \end{verbatim} \item<2-> Some functions may be imported, eg: \begin{verbatim} import math from os import getcwd print getcwd() # which directory am I in? x = math.sqrt(9) # 3 \end{verbatim} \item<3-> Some functions are user-defined, eg: \begin{verbatim} def multiply(a, b): return a * b print multiply(4, 5) print multiply('-', 5) \end{verbatim} \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{Functions in Python} \begin{itemize} \item<1-> Functions are a way to group a set of statements that can be run more than once in a program. \item<1-> They can take parameters as inputs, and can return a value as output \item<2-> Example \begin{verbatim} def square(x): # create and assign function return x*x y = square(5) # y gets assigned the value 25 \end{verbatim} \item<3-> \texttt{def} creates a function object, and assigns it to a name (\texttt{square} in this case) \item<3-> \texttt{return} sends an object back to the caller \item<3-> Adding \texttt{()} after the functions name calls the function \end{itemize} \uncover<4->{(\emph{Learning Python}, chapter 12)} \end{frame} \begin{frame}[fragile] \frametitle{Intersection function} \begin{verbatim} def intersect(seq1, seq2): res = [] for x in seq1: if x in seq2: res.append(x) return res \end{verbatim} \pause \begin{itemize} \item Putting the code in a function means you can run it many times \item General --- callers pass any 2 sequences \item Code is in one place, makes changing it easier (if you have to) \end{itemize} \end{frame} \subsection{Variables and functions} \begin{frame}[fragile] \frametitle{Local variables} Variables inside a function are \emph{local} to that function {\footnotesize \begin{verbatim} >>> def intersect(s1, s2): ... res = [] ... for x in s1: ... if x in s2: ... res.append(x) ... return res ... >>> intersect([1,2,3,4], [1,5,6,4]) [1, 4] >>> res Traceback (most recent call last): File "<stdin>", line 1, in ? NameError: name 'res' is not defined >>> x Traceback (most recent call last): File "<stdin>", line 1, in ? NameError: name 'x' is not defined \end{verbatim}} \end{frame} \begin{frame}[fragile] \frametitle{Argument passing} Arguments are passed by assigning objects to \emph{local} names: \begin{verbatim} >>> def plusone(x): ... x = x+1 ... return x ... >>> plusone(3) 4 >>> x=6 >>> plusone(x) 7 >>> x 6 \end{verbatim} \end{frame} \begin{frame}[fragile] \frametitle{Passing mutable arguments} Recall that numbers, strings, tuples are \emph{immutable}, and that lists and dictionaries are \emph{mutable}: {\small \begin{verbatim} >>> def appendone(s): ... s.append('one') ... return s ... >>> appendone(['a', 'b']) ['a', 'b', 'one'] >>> l = ['a', 'b'] >>> appendone(l) ['a', 'b', 'one'] >>> l ['a', 'b', 'one'] \end{verbatim}} \end{frame} \begin{frame}[fragile] \frametitle{But variable names are still local} \begin{verbatim} >>> def doesnothing(l): ... l = ['1', '2'] ... >>> l = ['a', 'b'] >>> doesnothing(l) >>> l ['a', 'b'] \end{verbatim} \end{frame} \subsection{Importing functions} \begin{frame}[fragile] \frametitle{Importing functions} Put the definition of intersect in a module (call the file \texttt{foo.py}), then you can import it: \pause \begin{verbatim} from foo import intersect # ... define lst1 and lst2 l3 = intersect(lst1, lst2) \end{verbatim} \pause or \begin{verbatim} import foo # ... define lst1 and lst2 l3 = foo.intersect(lst1, lst2) \end{verbatim} \end{frame} \subsection{Functional programming} \begin{frame}[fragile] \frametitle{map} \begin{verbatim} >>> counters = range(1, 6) >>> updated = [] >>> for x in counters: ... updated.append(x+3) ... >>> updated [4, 5, 6, 7, 8] \end{verbatim} \pause \begin{verbatim} >>> def addthree(x): ... return x+3 ... # map applies its first argument (a function) # to each element of its second (a list) >>> map(addthree, counters) [4, 5, 6, 7, 8] \end{verbatim} \end{frame} \begin{frame}[fragile] \frametitle{Anonymous functions and list comprehensions} \pause \begin{verbatim} # lambda is a way of defining a function with no name >>> map((lambda x: x+3), counters) [4, 5, 6, 7, 8] \end{verbatim} \pause \begin{verbatim} # you can even have a list comprehension... >>> res = [addthree(x) for x in counters] >>> res [4, 5, 6, 7, 8] \end{verbatim} Also check out \texttt{apply}, \texttt{filter} and \texttt{reduce} \end{frame} \subsection{Designing functions} \begin{frame} \frametitle{Function design} \begin{itemize} \item Use arguments for the inputs, and return for outputs: try to make a function independent of things outside it \item Avoid global variables when possible \item Don't change mutable arguments if possible \item Functions should do one thing well (not do many things) \item Functions should be relatively small \end{itemize} \end{frame} \section{Summary} \begin{frame} \frametitle{Summary} \begin{itemize} \item Loops: \texttt{for} and \texttt{while} \item Functions in Python: built-in, supplied in modules, user-defined \item Defining functions with \texttt{def} \item Function arguments and return values \item Variables defined in functions are local to the function \item Mutable objects can be changed in functions \item Fancier stuff: mapping functions onto sequences \end{itemize} \end{frame} \end{document}
{ "alphanum_fraction": 0.6494814815, "avg_line_length": 21.3607594937, "ext": "tex", "hexsha": "454d639b6ec2338120f9f85e93074c8f1b36601d", "lang": "TeX", "max_forks_count": 114, "max_forks_repo_forks_event_max_datetime": "2021-11-13T08:16:02.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-13T04:47:49.000Z", "max_forks_repo_head_hexsha": "c152bde901f05915e90b07a615b232adb123bed8", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "silky/nltk_contrib", "max_forks_repo_path": "doc/klein-renals/python.d/python2.tex", "max_issues_count": 12, "max_issues_repo_head_hexsha": "c152bde901f05915e90b07a615b232adb123bed8", "max_issues_repo_issues_event_max_datetime": "2020-07-30T23:00:41.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-13T06:27:18.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "silky/nltk_contrib", "max_issues_repo_path": "doc/klein-renals/python.d/python2.tex", "max_line_length": 76, "max_stars_count": 123, "max_stars_repo_head_hexsha": "15d82cb187f0a859fa1366fb0d69dc25c365a3ff", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "peresolb/nltk_contrib", "max_stars_repo_path": "doc/klein-renals/python.d/python2.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-01T10:05:16.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-06T10:46:18.000Z", "num_tokens": 3284, "size": 10125 }
% % This is Chapter 4 file (chap4.tex) % \chapter{Datasets and Analysis Methods}\label{chap:chap4} We utilized multiple datasets (both observational and theoretical) while working on this thesis. This chapter gives a brief overview of all the datasets. \Cref{tab:datachap} summarizes these datasets and how they were used. \section{Spacecraft datasets}\label{sec:intr41} \subsection{Wind}\label{sec:wind} The Wind spacecraft\index{spacecraft!Wind} was launched on November 1, 1994 as part of international Solar terrestrial Physics (ISTP) with the objective of studying plasma processes in the solar wind near earth and in magnetosphere and ionosphere \citep{Acuna1995}. Wind is spin stabilized and makes one complete rotation every $\sim$\,3 seconds about axis aligned to perpendicular to the ecliptic plane \citep{Acuna1995, WilsonIII2021}. Wind's instruments collectively produce $\sim$\,1,100 data variables or datasets \citep{WilsonIII2021}. The instruments of interest to this thesis are the Magnetic Field Investigation (MFI) and the Faraday Cup (FC). \paragraph*{MFI}\label{sec:mfi} MFI consists of two fluxgate magnetometers mounted on a boom at distances of 8 and 12 meters from the spacecraft \citep{Lepping1995}. Though occasionally MFI can provide data as fast as 44\,Sa/s\footnote{Samples per second.} with great accuracy ($< 0.08\,\rm nT$), though 10.9\,Sa/s is the standard product and was used for this thesis. \paragraph*{FC}\label{sec:fc} The Solar Wind Experiment (SWE) suite includes two Faraday cups (FC) \citep{Ogilvie1995}. Each cup measures the current from incoming charged ions for a different energy bin during each rotation measuring current in 20 different look directions. It has 31\,energy bins which defines its resolution of the VDF. Since each rotation lasts about 3\,seconds, it takes FC roughly 93\,seconds to collect the full spectra. The current can then be converted to velocity of particle assuming an appropriate charge to mass ratio. Since it takes roughly 93\,seconds to get the full VDF, we get one measurement of parameters like density, velocity, temperature etc. every 93\,seconds. Consequently, while pairing FC data with MFI data, we further averaged MFI data to 93\,Sa/s. For an in depth discussion of extracting VDF from FC observation and computation of higher order moments see \citet{Maruca2012a}. \paragraph*{The \texttt{wnd} dataset} \label{sec:wndds} In this thesis we use the Wind data from 1994 to 2008, which henceforth shall be referred as \texttt{wnd} dataset. In the initial data cleaning process we discarded any point which had $R_{\rm p} < 0.1$ or $R_{\rm p} > 10$. We also only selected data from the pristine solar wind and discarded everything within the bow shock region of the Earth. A more detailed description of the data selection process can be found in \citet[\S4.1]{Maruca2012a}. \textbf{Computing linear growth rate and non-linear frequency}: In order to compute the value of linear growth rates at any point, we use the methodology mentioned in \Cref{sec:cgr} by using the local values of $R_{\rm p}$ and $\beta_{\parallel \rm p}$. We computed $\omega_{\rm nl}$ using \Cref{eq:omnl}, where we used $x$-component\footnote{For Wind, $x$-direction is defined by the line joining the Earth and the Sun.} of magnetic field for the longitudinal direction of \textbf{B}. Use of \textbf{x}-component instead of radial component introduces a small error in the computation of $\omega_{\rm nl}$ since the magnetic field at 1\,au is not perfectly aligned with the radial direction (on average, the angle between magnetic field and radial direction is $45^\circ$). The field also strongly fluctuates around the average value. Alfv\'en speed was computed using the average field from MFI data and $n_{\rm p}$ from FC as per equation \Cref{eq:alfv}. For lag we used $\ell = 1/k_{\max}$, where $k_{\max}$ is the wave number corresponding to $\gamma_{\max}$. The lag of was taken as $1/k_{\max}$ in order to ensure that both $\gamma_{\max}$ and $\omega_{\rm nl}$ are being computed at the same scale. \subsection{MMS}\label{sec:mms} Magnetospheric Multiscale (MMS)\index{spacecraft!MMS} is a constellation of four spacecraft which was launched by NASA on March 12, 2015. Main objective of the mission was to study how reconnection happens in a collisionless plasma in the Earths magnetosphere \citep{Russell2016}. MMS has 6 major instrument suites \citep{Russell2016} and in this thesis we used the data from FIELDS and Fast Plasma Investigation (FPI). \paragraph*{FIELDS} \label{sec:fields} The FIELDS instrument suite consists of 2 different kind of fluxgate magnetometers, a search coil magnetometer and an electron drift instrument \citep{Torbert2016}. The flux gate magnetometers are mounted at the end of two 5\,m booms of each spacecraft \citep{Russell2016}. The cadence of this FGMs is 128\,Hz meaning we get 128 samples of magnetic field vector every 1 second with an accuracy of $\sim$\,0.1\,nT \citep{Russell2016, Torbert2016}. \paragraph*{FPI} \label{sec:fpi} FPI uses electrostatic analyzer to measure the VDF of ions and electrons \citep{Pollock2016}. It has $\mathrm{180}^\circ$ instantaneous polar field of view at a resolution of $\mathrm{15}^\circ$. We use the proton density and temperature anisotropy which are among the standard products of FPI. FPI works in 2 modes: (a) \textbf{Slow/Survey mode}: which gives full 3-D VDF of ions every 1\,second. (b) \textbf{Fast/Burst Mode}: which gives 1 measurement of ion VDF every 150\,ms. \paragraph*{\texttt{mms} dataset} \label{sec:mmsds} Though in burst mode cadence of FPI is very high they generally last for only a few minutes. In our studies we thus used data from several different burst modes spread over multiple years and when the spacecraft was in magnetosheath. \Cref{tab:mmsdata} lists out all the dates and time from which data was used as well as gives value of the plasma parameters. \begin{table}[ht] \centering \caption[\texttt{mms} dataset details]{Burst data duration and median values of some plasma parameters} \begin{tabular}{ m{0.20\linewidth} m{0.20\linewidth} m{0.20\linewidth} m{0.25\linewidth} } \hline \\ \multirow{2}{\linewidth}{Date \scriptsize{(YYYYMMDD)}} & \multicolumn{2}{|c|}{Time \scriptsize{(HH:MM:SS) (GMT)}} & \multirow{2}{\linewidth}{Median Values} \\ \cline{2-3} & Start \newline HH:MM:SS & End \newline HH:MM:SS & \\ \hline \\ 20160111 & 00:57:04 & 01:00:33 & $n_{\rm p}$ = 52.04 $cm^{-3}$, \newline $v_{\rm p}$ = 261.47 $km/s$, \newline $T_{\rm p}$ = 2.53 $\times \mathrm{10^6} K$, \newline $R_{\rm p}$ = 1.09, \newline $\beta_{\parallel \rm p}$ = 6.54\\ \\ \hline \\ 20160124 & 23:36:14 & 23:47:33 & $n_{\rm p}$ = 32.57 $cm^{-3}$, \newline $v_{\rm p}$ = 242.21 $km/s$, \newline $T_{\rm p}$ = 3.98 $\times \mathrm{10^6} K$, \newline $R_{\rm p}$ = 0.99, \newline $\beta_{\parallel \rm p}$ = 12.57\\ \\ \hline \\ %20160125 & & & \\ %\hline 20170118 & 00:45:54 & 00:49:43 & $n_{\rm p}$ = 198.26 $cm^{-3}$, \newline $v_{\rm p}$ = 135.11 $km/s$, \newline $T_{\rm p}$ = 1.31 $\times \mathrm{10^6} K$, \newline $R_{\rm p}$ = 0.97, \newline $\beta_{\parallel \rm p}$ = 10.66\\ \\ \hline \\ %20170127 & & & \\ %\hline 20171226 & 06:12:43 & 06:52:22 & $n_{\rm p}$ = 22.29 $cm^{-3}$, \newline $v_{\rm p}$ = 243.50 $km/s$, \newline $T_{\rm p}$ = 2.66 $\times \mathrm{10^6} K$, \newline $R_{\rm p}$ = 1.04, \newline $\beta_{\parallel \rm p}$ = 4.29\\ \\ \hline %20181103 & & & \\ %\hline \\ All & & & $n_{\rm p}$ = 2.94 $cm^{-3}$, \newline $v_{\rm p}$ = 240.15 $km/s$, \newline $T_{\rm p}$ = 2.74 $\times \mathrm{10^6} K$, \newline $R_{\rm p}$ = 1.01, \newline $\beta_{\parallel \rm p}$ = 5.34\\ \\ \hline \end{tabular} \label{tab:mmsdata} \end{table} Once we have the required parameters we compute other derived parameters like $\gamma$ and $\omega_{\rm {nl}}$ in the same way as mentioned in \Cref{sec:wndds}. We refer to the complete MMS dataset as \texttt{mms}. \subsection{PSP}\label{sec:psp} Parker Solar Probe\index{spacecraft!PSP} was launched on August 12, 2018 with the objective to understand the dynamical structure of the sun, study and find the processes behind coronal heating and find out the process that accelerates energetic particles \citep{Fox2015}. The spacecraft has 4 major instrument suites: FIELDS, SWEAP, WISPR, \isois \citep{Fox2015}. \paragraph*{FIELDS}\label{sec:fields2} With main objective of measuring wave and turbulence in the inner heliosphere FIELDS measures the magnetic field using both, search coils and fluxgate magnetometers \citep{Bale2016}. All three magnetometers are mounted on a boom (search coil at 3.08\,m and 2 magnetometers at 1.9\,m and 2.7\,m). For this thesis we use the magnetic field data from flux gate magnetometer. At the highest cadence magnetometer can record field at a rate of 292.969\,Sa/s or 256\,Sa/NYS, where 1\,NYsecond is defined as 0.837\,seconds \citep{Bale2016}\footnote{An alternate and definitely more magically colorful definition of a New York second is given by Sir Terry Pratchett as ``The shortest unit of time in the multiverse is the New York Second, defined as the period of time between the traffic lights turning green and the cab behind you honking.”}. Though for this thesis we mostly used data recorded at a slightly lower cadence of 64\,Sa/S unless otherwise specified. \paragraph*{SWEAP}\label{sec:sweap} Solar Wind Electrons Alphas and Protons or SWEAP is the particle instrument suite on PSP and is comprised of 4 sensor instruments and provides complete measurement of electron, alpha and protons which makes up for almost 99\% of solar wind \citep{Kasper2016}. Solar Probe Cup (SPC) and Solar Probe Analyzer (SPAN) make up SWEAP. We are mostly interested in SPC which is a fast Faraday cup and looks directly at the sun to measure the ion flux and its angle. The native cadence of SPC is 1\,Hz or 1\,Sa/s at an angular resolution of $\mathrm{10}^\circ$, though in another mode cadence can go as high as 16\,Hz at $\mathrm{1}^\circ$ resolution \citep{Kasper2016}. For this thesis we used 1\,Hz data from SPC. Though for the purpose of computation of anisotropy we resampled the data to 0.1\,Hz (see \Cref{chap:chap7}). \paragraph*{\texttt{psp} dataset} \label{sec:pspds} We used the PSP data from its first encounter with the Sun (October 31 to November 11, 2018). From SPC we got the radial proton temperature/thermal speed. Since SPC only measures radial temperature, and proton temperature is significantly anisotropic \citep{Huang2020}, for computation of $\beta_{\parallel \rm p}$ we needed to ensure that the temperature we were measuring was indeed parallel temperature. Thus, we only considered data points where magnetic field was mostly radial. Any interval where the angle between $B_{\rm r} \mathbf{\hat{r}}$ and $\mathbf{B}$ was more than 30\,degrees was not considered. This ensured that the temperature measured by SPC was indeed the parallel temperature. We compute temperature anisotropy at a much lower cadence than the temperature measurement ($\sim$\,0.1\,Sa/S) using the methodology described in \citet{Huang2020}. Once we have the anisotropy data along with proton density and magnetic field strength we compute the $\beta_{\parallel \rm p}$ according to \Cref{eq:beta}. We then calculate $\gamma$ and $\omega_{\rm {nl}}$ using the same methodology as mentioned in \Cref{sec:wndds}. \section{Simulation datasets}\label{sec:intr42} Though spacecraft provide plenty of in-situ data, because of several restrictions (e.g., cost, planning, resolution, and cadence) not every phenomena of plasma can be studied using spacecraft data. Thus physicists often use simulations to study different systems or verify predictions made by theories under certain conditions. For space plasmas there are 3 types of simulations that are usually carried out. \subsection{MHD Simulations}\label{sec:mhd} MHD simulation\index{simulation!MHD} treats the plasma as an electromagnetic, conducting fluid having one characteristic velocity and temperature and studies its dynamics by numerically solving the required MHD equations. For more details about the underlying physics and some of the relevant equations see \citep{Hossain1995}. \subsection{Hybrid Simulations}\label{sec:hybd} In hybrid simulations\index{simulation!hybrid}, instead of treating the whole system as a fluid, electrons are treated as massless fluid and protons are treated as massive particles. For the details of such simulations and equations used for it refer to \citep{Terasawa1986, Vasquez1995, Parashar2009}. \subsection{Kinetic Simulations}\label{sec:kntc} In kinetic simulations\index{simulation!kinetic} with particle in cell (PIC) we solve Vlasov\index{Vlasov} equation (see \Cref{eq:vlas}) along with Maxwell's equation (see \Crefrange{eq:maxwell1}{eq:maxwell4}) by treating plasma as a collection of individual particles. PIC simulations are often performed on either a 2.5D system or a full 3-D system. \paragraph*{3-D PIC Simulations}\label{sec:3pic} In full 3-D system\index{simulation!3-D PIC} the parameters are setup such that the vectors can fluctuate in all three directions. For this thesis, we used the output of a fully kinetic 3-D simulation performed by \citep{Roytershteyn2015}. In the simulation the system was initially perturbed ($|\delta\mathbf{B}^2| = \mathbf{B}_{\rm 0}^2$) and was then left to evolve under its own forcing. The undisturbed state of particle distribution was Maxwellian (for both proton and electron) at equal temperature ($T_{\rm p} = T_{\rm e}$). Some other parameters were $\beta_{\rm p}=\beta_{\rm e}=\, 0.5, R_{\rm p}=1, \omega_{\rm pe}/\Omega_{\rm ce} = 2$, $m_{\rm p}/m_{\rm e} = 50$ and the background magnetic field was in $z$-direction. Size of the box was $l \approx 42\,d_{\rm p}$, with a resolution of $2048^3$ cells. Average number of particles in each cell was 150 making a total of $\sim\,2.6 \times 10^{12}$. We refer to this dataset as \texttt{ros}. \paragraph*{2.5-D Simulations}\label{sec:2pic} In case of a 2.5D simulation\index{simulation!2.5-D} the plasma parameters are allowed to vary only in 2 dimensions, though they have all 3 components. Depending on the direction of background magnetic field one can further classify 2.5-D simulation in following classes: (a) \textbf{2.5D perpendicular PIC simulation}: The parameters are allowed to vary only in 2 spatial dimensions with background magnetic field perpendicular to the simulation plane. (b) \textbf{2.5D oblique PIC simulation}: The parameters are allowed to vary only in 2 spatial dimensions with background magnetic field neither parallel nor perpendicular to the simulation plane. (c) \textbf{2.5D parallel PIC simulation}: The parameters are allowed to vary only in 2 spatial dimensions with background magnetic field parallel to the simulation plane.\\ In this thesis we used both perpendicular and parallel simulations. For the 2.5-D perpendicular simulation we used the output from a P3D code \citep{Zeiler2002}. The initial conditions were such that we have $m_{\rm p}/m_{\rm e} = 25, T_{\rm p} = T_{\rm e}, \beta_{\rm p} = \beta_{\rm e} = 0.6, \delta B = 0.5\,B_{\rm 0}$ and the length of the box was $l = 149.6\,d_{\rm p}$ at a resolution $4096^2$ of with each cell having an average of 3200 particles with each species resulting in a total of $1.07 \times 10^{11}$ particles. For more details on the simulation refer to \citep{Parashar2018}. We refer to this dataset as \texttt{149p6}. We also used a 2.5-D parallel simulation where the background magnetic field was in the plane with $\mathbf{B}_{\rm 0} = B_{\rm 0} \hat{x}, m_{\rm p}/m_{\rm e} = 25, \omega_{\rm pe}/\Omega_{\rm ce} = 8, \beta_{\rm p}= \beta_{\rm e} = 0.6$. The size of the box was $l_\parallel = 149.6\,d_{\rm p}$ (in parallel direction) and $l_\perp = 37.4\,d_{\rm p}$ (in perpendicular direction) at a resolution of $4043 \times 1000$ with an average of 800 particles/cell resulting in a total of $6.5 \times 10^{9}$ particles. More information about this simulation can be found in \citep{Parashar2019, Gary2020}. We refer to this dataset as \texttt{kaw}. For 2 datasets of simulations (\texttt{kaw} and \texttt{149p6}), once we have the value of $R_{\rm p}$ and $\beta_{\parallel \rm p}$ we compute $\gamma$ and $\omega_{\rm nl}$ in the same way as mentioned in \Cref{sec:wndds}. For the case \texttt{ros}, for computation of $\omega_{\rm nl}$, because of some computational limitations, the value of lag was kept fixed at $1\,d_{\rm p}$. \begin{table}[ht] \centering \caption{Datasets used in this study} \begin{tabular}{ m{0.1\linewidth} m{0.3\linewidth} m{0.20\linewidth} m{0.3\linewidth}} \\ \hline \\ Dataset & Type of data & median values & List of chapters\\ \\ \hline \\ \texttt{149p6} & PIC Simulation (2.5-D) & $R_{\rm p} = 0.89$, \newline $\beta_{\rm \parallel p} = 0.67$ & \Cref{chap:chap5,chap:chap7}\\ \\ %\hline \\ \texttt{kaw} & PIC Simulation (2.5-D) & $R_{\rm p} = 0.83$, \newline $\beta_{\parallel \rm p} = 0.64$ & \Cref{chap:chap5,chap:chap7} \\ \\ %\hline \\ \texttt{ros} & PIC Simulation (3-D) & $R_{\rm p} = 1.04 $, \newline $\beta_{\parallel \rm p} = 0.84$ & \Cref{chap:chap5,chap:chap7,chap:chap8} \\ \\ %\hline \\ \texttt{mms} &Spacecraft Observation (Magnetosheath) & see \Cref{tab:mmsdata} & \Cref{chap:chap5,chap:chap7} \\ \\ %\hline \\ \texttt{wnd} & Spacecraft Observation (Solar Wind at 1\,au) & $R_{\rm p} = 0.50$, \newline $\beta_{\parallel \rm p} = 0.69$ & \Cref{chap:chap5,chap:chap7} \\ \\ %\hline \\ \texttt{psp} & Spacecraft Observation (Solar Wind at 0.2\,au) & $R_{\rm p} = 1.44 $, \newline $\beta_{\parallel \rm p} = 0.50$ & \Cref{chap:chap6,chap:chap7} \\ \\ \hline \end{tabular} \label{tab:datachap} \end{table}
{ "alphanum_fraction": 0.5891427805, "avg_line_length": 65.5806451613, "ext": "tex", "hexsha": "99c69b3dbf34304dab96effa174ff52b0e04faa0", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5ee6e2ff80ebc0192f4f2bc592273eb9c3712796", "max_forks_repo_licenses": [ "FSFAP" ], "max_forks_repo_name": "qudsiramiz/ud-phd-thesis", "max_forks_repo_path": "chap4.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5ee6e2ff80ebc0192f4f2bc592273eb9c3712796", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "FSFAP" ], "max_issues_repo_name": "qudsiramiz/ud-phd-thesis", "max_issues_repo_path": "chap4.tex", "max_line_length": 132, "max_stars_count": null, "max_stars_repo_head_hexsha": "5ee6e2ff80ebc0192f4f2bc592273eb9c3712796", "max_stars_repo_licenses": [ "FSFAP" ], "max_stars_repo_name": "qudsiramiz/ud-phd-thesis", "max_stars_repo_path": "chap4.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5707, "size": 22363 }
\section{Introduction} This specification describes the incremental changes from the Alonzo era of Cardano to the Babbage era. The main objective of this era is to make small adjustments in many places, usually to simplify the ledger or to include features that didn't make it into past eras. As part of this effort, we also make some changes to the notation used in these specifications, which should make them easier to understand and maintain. Concretely, this specification makes the following changes: \begin{itemize} \item Add reference inputs to transactions \item Add inline datums in the UTxO \item Add reference scripts \item Add transaction fields for specifying and returning collateral \item Remove the protocol parameters $\var{d}$ and $\var{extraEntropy}$ \item Remove the overlay schedule \item Block headers to only include a single VRF value and proof \item Remove the pre-filtering of unregistered stake credentials in the reward calculation \end{itemize}
{ "alphanum_fraction": 0.8116683726, "avg_line_length": 44.4090909091, "ext": "tex", "hexsha": "625adf1c0e1f6d7a265bfd6c73ed7dedbd17e060", "lang": "TeX", "max_forks_count": 29, "max_forks_repo_forks_event_max_datetime": "2022-03-29T12:10:55.000Z", "max_forks_repo_forks_event_min_datetime": "2019-03-25T11:13:24.000Z", "max_forks_repo_head_hexsha": "c5f3e9db1c22af5d284885ddb1785f1bd7755c67", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "RoyLL/cardano-ledger", "max_forks_repo_path": "eras/babbage/formal-spec/introduction.tex", "max_issues_count": 545, "max_issues_repo_head_hexsha": "c5f3e9db1c22af5d284885ddb1785f1bd7755c67", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:41:28.000Z", "max_issues_repo_issues_event_min_datetime": "2019-03-19T17:23:38.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "RoyLL/cardano-ledger", "max_issues_repo_path": "eras/babbage/formal-spec/introduction.tex", "max_line_length": 90, "max_stars_count": 67, "max_stars_repo_head_hexsha": "c5f3e9db1c22af5d284885ddb1785f1bd7755c67", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "RoyLL/cardano-ledger", "max_stars_repo_path": "eras/babbage/formal-spec/introduction.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-29T01:57:29.000Z", "max_stars_repo_stars_event_min_datetime": "2019-03-20T21:30:17.000Z", "num_tokens": 214, "size": 977 }
\subsection{Polynomials} \noindent We'll use our derivative formula and induction to show that \begin{equation*} \Laplace{t^n} = \frac{n!}{s^{n+1}} \text{, } n \geq 0. \end{equation*} Although the formula for $n = 0$ is clearly the same as $\Laplace{1}$, we'll use $n = 1$ as a base case to get a little more insight into where the formula comes from.\\ \begin{equation*} \Laplace{1} = \Laplace{t'} = \frac{1}{s}. \end{equation*} Using our derivative formula, \begin{align*} \frac{1}{s} &= s\Laplace{t} - 0^{1} \\ &\implies \Laplace{t} = \frac{1}{s^2}. \end{align*} Assuming the following is true, \begin{equation*} \Laplace{t^n} = \frac{n!}{s^{n+1}} \text{, } n \geq 0. \end{equation*} We'll show that the $n+1$ case follows. \begin{equation*} \Laplace{t^n} = \Laplace{\left(\frac{t^{n+1}}{n+1}\right)'} = \frac{n!}{s^{n+1}}. \end{equation*} Using our derivative formula and the linearity of the Laplace transform, \begin{align*} \frac{n!}{s^{n+1}} &= \frac{s}{n+1}\Laplace{t^{n+1}} - 0^{n+1} \\ &\implies \Laplace{t^{n+1}} = \frac{(n+1)!}{s^{n+2}}, \end{align*} which is the $n+1$ case, meaning we have proven the general formula as correct.
{ "alphanum_fraction": 0.6276505513, "avg_line_length": 40.6551724138, "ext": "tex", "hexsha": "024af447def4c5e88ba071fb6f25b1d5260dc2f9", "lang": "TeX", "max_forks_count": 10, "max_forks_repo_forks_event_max_datetime": "2021-08-17T15:21:12.000Z", "max_forks_repo_forks_event_min_datetime": "2020-04-10T05:41:17.000Z", "max_forks_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "aneziac/Math-Summaries", "max_forks_repo_path": "diffEq/laplaceTransforms/derivations/polynomial.tex", "max_issues_count": 26, "max_issues_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_issues_repo_issues_event_max_datetime": "2021-10-07T04:47:03.000Z", "max_issues_repo_issues_event_min_datetime": "2020-03-28T17:44:18.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "aneziac/Math-Summaries", "max_issues_repo_path": "diffEq/laplaceTransforms/derivations/polynomial.tex", "max_line_length": 170, "max_stars_count": 39, "max_stars_repo_head_hexsha": "20a0efd79057a1f54e093b5021fbc616aab78c3f", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "aneziac/Math-Summaries", "max_stars_repo_path": "diffEq/laplaceTransforms/derivations/polynomial.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-17T17:38:45.000Z", "max_stars_repo_stars_event_min_datetime": "2020-03-26T06:20:36.000Z", "num_tokens": 465, "size": 1179 }
\section{Orthogonality} \subsection{} Recall that \begin{equation} \vec{v}\cdot\vec{w}=v_1\cdot w_1+\ldots+v_n\cdot w_n = | | \vec{v} | | | | \vec{w} | | \cos(\theta) ,\end{equation} where $\theta$ is the angle between $\vec{v}$ and $\vec{w}$. Because $\cos\left(\frac{\pi}{2}\right)=0$, we know that $\vec{v}\cdot\vec{w}=0$ if and only if $\vec{v}$ is orthogonal to $\vec{w}$. In general, given $\vec{v},\vec{w}\in\R^n$, $\vec{v}$ is orthogonal to $\vec{w}$ if and only if the angle between them is $\frac{\pi}{2}$. \begin{definition} Let $U$ be any inner product space. A basis $\vec{u},\ldots,\vec{u_h}\in V$ is orthogonal if $\vec{u_j}\cdot \vec{u_i} = 0$ whenever $i\neq j$.\newline In addition, if $| |u_i| |=1$ for all $i$'s, the basis is orthonormal. \end{definition} Here are a few examples of orthonormal and orthogonal basis's: \begin{enumerate} \item $\SmallMatrix{1\\0}\SmallMatrix{0\\1}$ is an orthonormal basis. \item $\SmallMatrix{2\\0}\SmallMatrix{0\\1}$ is an orthogonal basis. \item $\SmallMatrix{1\\0\\0}\SmallMatrix{0\\1\\0}\SmallMatrix{0\\0\\1}$ is an orthonormal basis. \end{enumerate} If $\vec{v_1},\ldots,\vec{v_n}$ is an orthogonal basis, then \begin{equation} \frac{\vec{v_1}}{| |\vec{v_1}| |},\frac{\vec{v_2}}{| | \vec{v_2} | |},\ldots,\frac{\vec{v_n}}{| | v_n | |} \end{equation} is an orthonormal basis. \begin{problem} Consider $\vec{u_1}=\SmallMatrix{1\\2}$ and $\vec{u_2}=\SmallMatrix{2\\-1}$. We know that these two matrices are not linearly dependent because $\vec{u_1}$ is not a multiple of $\vec{u_2}$. We can see that this is an orthogonal basis. \begin{equation} | | \vec{u_1} | | = \sqrt{1^2+2^2} =\sqrt{5} \end{equation} so we know \begin{equation} v_1=\begin{pmatrix} \frac{1}{\sqrt{5} }\\\frac{1}{\sqrt{5} } \end{pmatrix} ,v_2=\begin{pmatrix} \frac{1}{\sqrt{5} }\\\frac{1}{\sqrt{5} } \end{pmatrix} \end{equation} \end{problem} \begin{prop} Assume $\vec{v_1},\ldots,\vec{v_n}\in V$ with $\vec{v_i}\neq \vec{0}$ for all $i$. Assume that $<v_i,v_j\ge 0$ whenever $i\neq j$, then $\{v_1,\ldots,v_n\}$ is linearly independent. \begin{proof} Assume that $c_1\vec{v_1}+\ldots+c_n\vec{n}=0$. Let $i\in\{1,\ldots,n\}$ \begin{align*} <c_1\vec{v_1}+\ldots+c_n \vec{v_n}, v_i> = <0,\vec{v_i}>\\ c_1<v_1,v_i> + c_2<v_2,v_i>+\ldots+ c_i<v_i,v_i>+\ldots+c_n<v_i,v_n>\\ c_i<v_i,v_i> = 0\\ c_i = 0 .\end{align*} Now we know that $v_1,\ldots,v_n$ are linearly independent. \end{proof} \end{prop} \begin{corollary} If $dim(v)=n$ and $\vec{v_1},\ldots,\vec{v_n}$ are $n$ vectors such that $<v_i,v_j> = 0$ wherever $i\neq j$, then $\{\vec{v_1},\ldots,\vec{v_n}\}$ is a basis. \end{corollary} \begin{theorem} Let $\vec{u_1},\ldots,\vec{u_n}$ be an orthonormal basis for $v$. Let $\vec{v}\in V$. Then we know $\vec{v}=c_1\vec{u_1}+\ldots+c_n \vec{u_n}$. In fact, $c_i=<\vec{v},\vec{u_i}>$ and \begin{equation} | |\vec{v} | | = \sqrt{<\vec{v},\vec{u_1},>^2+<\vec{v},\vec{u_2}^2 + <\vec{v},\vec{u_n}>^2}. \end{equation} \end{theorem} \begin{problem} $\mathbb{P}^2$ polynomials of degree $\le 2$ on [0,1]. Use the $L^2$ norm. \begin{equation} <p,q> = \int_0^{1}p(x)q(x)dx \end{equation} Let $p_1=1,p_2=x-\frac{1}{2},p_3=x^2-x+\frac{1}{6}$. \begin{align} <p_1,p_2> &= \int_0^{1}x-\frac{1}{2}dx=0\\ <p_1,p_3>&=\int_0^{1}x^2-x+\frac{1}{6}dx=0. \end{align} We have an orthogonal basis because of this. In order to check to see if it is orthonormal we must also do $<p_1,p_1>,<p_2,p_2>,<p_3,p_3>$ \end{problem} INSERT NOTES FROM 03.08 HERE So if we have the basis $\{\SmallMatrix{1\\2},\SmallMatrix{2\\1}\}, v_1=\SmallMatrix{1\\2}$. $v_2$ is equal to \begin{align*} \begin{pmatrix} 2\\1 \end{pmatrix} -\frac{\Big<\begin{pmatrix} 2\\1 \end{pmatrix} , \begin{pmatrix} 1\\2 \end{pmatrix} \Big>}{| | \begin{pmatrix} 1\\2 \end{pmatrix} }\cdot \begin{pmatrix} 1\\2 \end{pmatrix} \\ \begin{pmatrix} 2\\1 \end{pmatrix} -\frac{4}{\left(\sqrt{1^2+2^2} \right)^2 }\\ \begin{pmatrix} 2\\1 \end{pmatrix} -\frac{4}{5}\begin{pmatrix} 1\\2 \end{pmatrix}\\ \begin{pmatrix} 2\\1 \end{pmatrix} -\begin{pmatrix} \frac{4}{5}\\ \frac{8}{5} \end{pmatrix} = \begin{pmatrix} \frac{10}{5}\\\frac{5}{5} \end{pmatrix} -\begin{pmatrix} \frac{4}{5}\\\frac{8}{5} \end{pmatrix} \\ =\begin{pmatrix} \frac{6}{3} \\ -\frac{3}{5}\end{pmatrix} \SmallMatrix{2\\1}-\frac{<\SmallMatrix{2\\1}, \SmallMatrix{1\\2}>}{| | \SmallMatrix{1\\2}| | ^2}\cdot \SmallMatrix{1\\2} \SmallMatrix{2\\1}-\frac{4}{(\sqrt{1^2+2^2} )^2}\\ \SmallMatrix{2\\1} - \frac{4}{5}\SmallMatrix{1\\2}\\ \SmallMatrix{2\\1}-\SmallMatrix{\frac{4}{5}\\\frac{8}{5}}=\SmallMatrix{\frac{10}{5}\\\frac{5}{5}}-\SmallMatrix{\frac{4}{5}\\\frac{8}{5}}\\ \SmallMatrix{\frac{6}{3}\\-\frac{3}{5}}\\ .\end{align*} We can conclude that our basis is $\{\SmallMatrix{1\\2},\SmallMatrix{\frac{6}{5}\\-\frac{3}{5}}\}$. Let's now use our basis and rewrite it as \[ \begin{pmatrix} 2\\3 \end{pmatrix} =c_1\begin{pmatrix} 1\\2 \end{pmatrix} +c_2\begin{pmatrix} \frac{6}{5}\\-\frac{3}{5} \end{pmatrix} .\] We can simplify this and solve for $c_1,c_2$ \begin{align*} c_1&=<\begin{pmatrix} 2\\3 \end{pmatrix} ,\begin{pmatrix} 1\\2 \end{pmatrix}> = 2+6=8\\ c_2&=<\begin{pmatrix} 2\\3 \end{pmatrix} ,\begin{pmatrix} \frac{6}{5}\\-\frac{3}{5} \end{pmatrix}> = \frac{12}{5}-\frac{9}{5}=\frac{3}{5}\\ .\end{align*} Using Theorem 4.9 from the book, we do the following with an orthogonal basis to get its norm: \begin{align*} a_1=\frac{8}{| | v_1| |^2}=\frac{8}{(\sqrt{1^2+2^2} )^2}=\frac{8}{5}\\ a_2=\frac{\frac{3}{5}}{| | v_2 | |^2}=\frac{\frac{3}{5}}{\left(\frac{6}{5}\right)^2+\left(-\frac{3}{5}\right)^2}\\ =\frac{\frac{3}{5}}{\frac{36}{25}+\frac{9}{25}}=\frac{15}{25}\ldots\\ \begin{pmatrix} 2\\3 \end{pmatrix} =a_1\begin{pmatrix} 1\\2 \end{pmatrix} +a_2\begin{pmatrix} \frac{6}{5}\\-\frac{3}{5} \end{pmatrix} \\ \frac{8}{5}\begin{pmatrix} 1\\2 \end{pmatrix} + \frac{1}{3}\begin{pmatrix} \frac{6}{5}\\-\frac{3}{5} \end{pmatrix} \\ =\begin{pmatrix} \frac{8}{5}\\\frac{16}{5} \end{pmatrix} +\begin{pmatrix} \frac{2}{5}\\-\frac{1}{5} \end{pmatrix} =\begin{pmatrix} 2\\3 \end{pmatrix} .\end{align*} \begin{problem} Example of an orthogonal basis. Let $\mathbb{T}^{n}$ be the vector space of trigonometric polynomials. \[ \mathbb{T}^{n}=\sum_{0\le j+k\le n}a_{jk}\sin^{j}(x)\cos^k(x)\\ .\] Using the $L^2$ norm: \[ <f,g> = \int_{-\pi}^{\pi}f\cdot g .\] An orthogonal basis is $\{1,\cos(x),\sin(x),\cos(2x),\sin(2x),\cos(3x),\sin(3x),\ldots\}$. If we were going to do the $L^2$ norm for any of these equations we would need to do the following \[ \int_{-\pi}^{\pi}\sin(2x)\cos(4x)dx .\] This equation is the Fourier series. \end{problem} \subsection{} \subsection{Orthogonal Matrices} \begin{definition} AA square matrix $Q$ is orthogonal if $Q^{t}Q=Q\cdot Q^{T}=I$ \end{definition} If $Q$ is orthogonal, then \begin{itemize} \item $Q^{-1}=Q^{T}$ \item $det(Q)=\pm 1$ \item $Q\cdot Q^{t}=I$ \item $det(Q)det(Q^{T})=det(I)$ \item $(det(Q))^2=1$ \item $det(Q)=\pm 1$ \end{itemize} Let $A=\begin{pmatrix} a_{11}&a_{12}\\a_{21}&a_{22} \end{pmatrix} $. Suppose that $A$ is orthogonal, then \begin{align*} \begin{pmatrix} A_{11}&A_{12}\\A_{21}&A_{22} \end{pmatrix}\cdot\begin{pmatrix} A_{11}&A_{21}\\A_{12}&A_{22} \end{pmatrix} &=\begin{pmatrix} 1&0\\0&1 \end{pmatrix} \\ a_{11}^2+a_{12}^2&=1\\ a_{11}a_{21}+a_{12}a_{22}&=0\\ a_{21}a_{11}+a_{22}a_{12}&=0\\ a_{21}^2+a_{22}^2&=1 .\end{align*} If we plot $(a_{11},a_{12})$ on a graph, we can see that $\cos\left( \theta \right) =a_{12}$ and $\sin(\theta)=a_{11} $ \begin{prop} $Q$ is orthogonal if and only if its columns form an orthonormal basis. \begin{proof} Let $Q=[U_1\vdots U_2 \vdots\ldots\vdots U_n]$. \[ Q^{T}=\begin{bmatrix} U_1^{T}\\U_2^{T}\\ \vdots \\ U_n^{T} \end{bmatrix} .\] In $Q^{T}Q$, the $i,j^{th}$ entry is $U_1^{T}\cdot Uj$. \[ U_i^{T}\cdot U_j = \begin{cases} 0&i\neq j\\ 1&i=j \end{cases} .\] So the $U_i$'so form an orthonormal basis. \end{proof} \end{prop} \begin{problem} Let $A=\begin{bmatrix} 3&5\\7&\frac{1}{2} \end{bmatrix} $, and let $A$ be orthonormal. We know that $A\cdot A^{T}=A^{T}\cdot A=I$. Let's see if $A$ is an orthonormal basis. \[ \begin{bmatrix} 3&7\\5&\frac{1}{2} \end{bmatrix} \begin{bmatrix} 3&5\\7&\frac{1}{2} \end{bmatrix} \neq \begin{bmatrix} 1&0\\0&1 \end{bmatrix} .\] This is not equal because $3\times 7+3\times 5\neq0$. Now let's try letting $A=\begin{bmatrix} 3&-7\\7&3 \end{bmatrix} $. \[ \begin{bmatrix} 3&7\\-7&3 \end{bmatrix} \begin{bmatrix} 3&-7\\7&3 \end{bmatrix} =\begin{bmatrix} 1&0\\0&1 \end{bmatrix} .\] This vector works with the zero values, but not the ones values, so we need to normalize this vector. \begin{align*} \left| \left| \begin{bmatrix} 3\\7 \end{bmatrix} \right| \right|=\sqrt{4^2+7^2} =\sqrt{58} && \left| \left| \begin{bmatrix} -7\\3 \end{bmatrix} \right| \right|=\sqrt{(-7)^2+3^2} =\sqrt{58} \end{align*} \[ A=\begin{bmatrix} \frac{3}{58}&-\frac{7}{58}\\\frac{7}{58}&\frac{3}{58} \end{bmatrix} .\] \end{problem} Let's let $B=\begin{pmatrix} a&b\\c&d \end{pmatrix} $, and suppose $Q$ is orthogonal. \[ Q^{T}\cdot Q=\begin{pmatrix} a&c\\b&d \end{pmatrix} \begin{pmatrix} a&b\\c&d \end{pmatrix} =\begin{pmatrix} 1&0\\0&1 \end{pmatrix} .\] \begin{align*} a^2+c^2=1\\ ab+cd=0\\ ab+cd=0\\ b^2+d^2=1 .\end{align*} Given that the vectors $\begin{bmatrix} a\\c \end{bmatrix} ,\begin{bmatrix} b\\d \end{bmatrix} $ lie on the unit circle, we can determine that \begin{align*} a=\cos\theta&&c=\sin \theta\\ b=\cos\phi&&d=\sin\phi ,\end{align*} and we can determine that \[ 0=ab+cd=\cos\theta\cos\phi + \sin \theta\sin\phi=\cos(\theta-\phi) .\] If we use $\cos(\theta-\phi),$ we can determine that $\phi = \theta\pm\pi$, so $b=-\sin \theta, d=\cos\theta$ or $b=\sin \theta, d=-\cos\theta$. We either have $Q$ in one of two forms. \[ \begin{pmatrix} \cos\theta&&-\sin \theta\\ \sin \theta&&\cos\theta \end{pmatrix}\text{ or }\begin{pmatrix} \cos\theta && \sin \theta \\ \sin \theta &&-\cos\theta \end{pmatrix} .\] The determinant of the left matrix is 1, and the determinant of the right matrix is -1. They both give us a counter clockwise rotation by $\theta$, gives a reflection across the line with angle $\frac{\theta}{2}$ orthogonal matrices are square and $Q^{t}\cdot Q=Q Q^{t}=I$. Every $2\times 2$ orthogonal matrix has the form \[ \begin{pmatrix} \cos\theta&&-\sin \theta\\ \sin \theta && \cos \theta \end{pmatrix} \text{ or } \begin{pmatrix} \cos\theta&& \sin \theta\\ \sin \theta && -\cos\theta \end{pmatrix} . .\] In general, if $Q$ is orthogonal, then $det(Q)=\pm 1$ \begin{theorem} The product of two orthogonal matrices is orthogonal. Recall, if $Q$ is orthogonal, then $Q^{-1}=Q^{T}$. The orthogonal $n\times n$ matrices satisfy \begin{itemize} \item Closed under the dot product. \item Multiplication is associative. \item They all have inverses. \item The identity matrix is orthogonal. \end{itemize} \end{theorem} \subsection{Vector Spaces} Up until now we've been trying to get a basis to be able to establish a location of vectors. Once we have determined an inner product, we can find an angle. Let $V$ be a subspace. Let $W \le V$ be a finite dimensional subspace. \begin{definition} $\vec{z}\in V$ is orthogonal to $w$ if it is orthogonal to every vector in $w$. \end{definition} \begin{note} If $\vec{w_1},\ldots,\vec{w_n}$ is a basis for $w$, then $\vec{z}$ is orthogonal to $w$ if and only if $\vec{z}$ is orthogonal to $w_1,\ldots,w_n$. \end{note} \begin{definition} The orthogonal projection of $\vec{V}$ onto $w$ is the vector $\vec{w}$ such that $\vec{z}=\vec{v}-\vec{w}$, where $\vec{z}$ is orthogonal to $w$. \end{definition} \begin{theorem} Let $\vec{u_1},\ldots,\vec{u_n}$ be an orthogonal basis for $w$. Let $\vec{v}\in V$. The orthogonal projection of $\vec{v}$ onto $w$ is \[ \vec{w}=c_1\vec{u_1}+\ldots+c_n \vec{u_n}\text{ where $c_i<v,v_i>$} .\] \end{theorem} \begin{note} If $\vec{v_1},\ldots,\vec{v_n}$ is an orthogonal basis for $w$ then \begin{align*} w=a_1\vec{v_1}+\ldots+a_n \vec{v_n}\\ a_i=\frac{\left<\vec{v},\vec{v_i} \right>}{\|v_i\|^2} .\end{align*} \end{note} \begin{definition} Let $w,z\le V$ be subspaces. $w$ is orthogonal to $z$ if every vector in $w$ is orthogonal to every vector in $z$. For example, \[ <\vec{w},\vec{z}> = 0 \] for every $\vec{w}\in w, \vec{z}\in z$ . \end{definition} \begin{note} We only need to show this is true on the bases of $w$ and $z$. \end{note} \begin{definition} Let $w\in v$ be a subspace. The orthogonal compliment of $w$, written $w^{T}$ is the set of vectors in $v$ orthogonal to $w$. \[ w^{T}=\left\{ \vec{v}\in v | \left<\vec{v},\vec{w} \right> =0, \forall w\in w \right\} .\] \end{definition} \begin{theorem} Let $w<v$ be a finite dimensional subspace. Every $\vec{v}\in v$ can be written uniquely as \[ \vec{v}=\vec{w}+\vec{z} \] where $\vec{w}\in w$ and $\vec{z}\in w^{T}$. \end{theorem}
{ "alphanum_fraction": 0.5927056088, "avg_line_length": 46.6576271186, "ext": "tex", "hexsha": "3801db95b613ca36c62b1e1c6e63feae9255c5cc", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a501bcb919b60bc35fa43b99eb6ed2a2630cb100", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "therealkeyisme/Math-Notes", "max_forks_repo_path": "math363/chapters/4.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a501bcb919b60bc35fa43b99eb6ed2a2630cb100", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "therealkeyisme/Math-Notes", "max_issues_repo_path": "math363/chapters/4.tex", "max_line_length": 335, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a501bcb919b60bc35fa43b99eb6ed2a2630cb100", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "CameronSWilliamson/GU-MATH", "max_stars_repo_path": "math363/chapters/4.tex", "max_stars_repo_stars_event_max_datetime": "2021-05-18T20:55:25.000Z", "max_stars_repo_stars_event_min_datetime": "2021-05-18T20:55:25.000Z", "num_tokens": 5594, "size": 13764 }
\documentclass[11pt]{article} \usepackage{graphicx} \usepackage{fancyhdr} \usepackage{multicol} \usepackage{listings} \usepackage[margin=1in]{geometry} \usepackage[colorlinks=true, linkcolor=black, urlcolor=cyan]{hyperref} \begin{document} \begin{titlepage} \begin{center} %\includegraphics[height=3.5cm]{images/Standard_CUAUV.jpg}\\[0.2cm] \textsl{\huge MIT Splash}\\[0.5cm] {\huge Fall 2014}\\[0.2cm] \rule{\linewidth}{0.5mm}\\[0.2cm] {\Huge Machine Learning and Audio Analysis with Python} \rule{\linewidth}{0.5mm}\\[0.4cm] \huge Course Notes\\[0.2cm] \large Daryl Sew \begin{figure}[h!] \centering \includegraphics[scale=0.13]{ml_map.png} \caption{Flow chart for selecting the right class of algorithm for your problem from \texttt{scikit-learn} } \end{figure} \end{center} \end{titlepage} % set up header and footer \pagestyle{fancy} \fancyhf{} \setlength{\headheight}{30pt} \renewcommand{\headrulewidth}{0.4pt} \renewcommand{\footrulewidth}{0.4pt} \rhead{Machine Learning and Audio Analysis with Python} \rfoot{Fall 2014} \cfoot{\thepage} % Table of contents \tableofcontents \pagebreak \section{Overview} Machine learning is a field of computer science that concerns writing programs that can make and improve predictions or behaviors based on data inputs. The applications of machine learning are very diverse - they range from self driving cars to spam filters to autocorrect algorithms and much more. Using scikit-learn, an open source machine learning library for Python, we'll cover reinforcement learning (the kind used to create artificial intelligence for games like chess), supervised learning (the kind used in handwriting recognition), and unsupervised learning (the kind eBay uses to group its products). We'll then cover audio analysis through Fourier transforms with numpy, an open source general purpose computational library for Python, and we'll use our newfound audio analysis and machine learning skills to write very basic speech recognition software. Applications of machine learning to the fields of multitouch gesture recognition and computer vision will also be discussed, drawing from my work at Tesla and research on self driving cars and autonomous submarines. \section{What is Machine Learning?} All machine learning algorithms aim to take observations from a system and produce a model of that system. The key words here are 'system' and 'model'. Systems include anything from stock markets to populations of organisms to the environment surrounding a robot, essentially anything imaginable for which you can record observations. Models are a set of mathematical rules that describe a system. Note that in the process of making observations about a system, there will almost always be measurement error - we refer to this as noise, and a good data scientist is able to apply machine learning algorithms in a way that extracts regularities from observations and models those rather than modeling noise. \subsection{A Brief, Vague History} The first computer programs were based on explicit instructions; if this, then that. This type of programming is well suited to many tasks, but by no means could these programs be considered 'intelligent' - they were deterministic and could only react to whatever situations their programmer had manually prepared them for. One of the first intelligent programs was written by Arthur Samuel at IBM in 1952 - he applied reinforcement learning to the game of checkers in order to create a model that ranked playing strategies. This program could now improve its performance each game (update its model favorably), and mimicked intelligence. Around the same time, Marvin Minsky (MIT Professor, started CSAIL, AI pioneer) and Seymour Papert (MIT Professor, inventor of NetLogo + Lego Mindstorms, learning pioneer) were working on something called 'perceptrons'. Perceptrons were a primitive form of machine learning classifier that are modeled after the way our brains work, with neurons. In our bodies, neurons are the mechanisms by which the different parts of our body communicate with each other. Neurons conduct an electrical signal called an action potential, and different neurons are connected by 'synapses'. At a synapse, next neuron is only triggered if a certain threshold is exceeded. In a perceptron, there are a number of inputs to a 'synapse', and when some function of the input values exceeds a threshold, the output is 'fired' to 1 from 0. A single perceptron can only model 'linearly separable' data, as the perceptron model functions as a dividing line between data. In the late 1960s, Minsky and Seymour wrote a book, \textit{Perceptrons: An Introduction to Computational Geometry}, on perceptrons in which they discuss the impossibility of modeling the 'xor' function with this classifier, as xor is not 'linearly separable'. This book, along with a number of similarly unfortunate findings (DARPA cut general AI funding after a switch to mission oriented funding, Speech Understanding Research at CMU failed to produce a satisfactory product for DARPA, UK killed AI program), resulted in something referred to as the 'AI Winter', where AI research stagnated until the 1990s. Then, new discoveries and funding revival within different fields of AI and countries led to a new explosion in research; multilayer perceptrons now known as neural networks were developed which could model nonlinearities in data, speech recognition became a commercial success, to name a few, and AI was combined with statistics to solve very practical problems, giving birth to the field now known as machine learning. Since then, machine learning has progressed to the point where knowledge of machine learning and statistical problem solving methods is helpful in any STEM field, and it's also in my opinion one of the coolest subfields of computer science. \subsection{Main Categories} Unsupervised learning is used to find hidden structure in unlabeled data. Finding structure can answer a lot of questions about a dataset, as well as guide further exploration. An example structure is a set of groups of data points that are considered by some distance metric to be very close together. Supervised learning is used to infer a function that describes a set of labeled data such that labels can be attached to newly collected, unlabeled data. Reinforcement learning is used to find the optimal set of actions to take in a dynamic environment. \section{Unsupervised Learning} \subsection{K-means clustering} \subsubsection{Theory} 1. Gather a set of points in n dimensions. 2. Select k points at random from this data set; these will be the 'k means' (or centroids). 3. Assign all points to the 'cluster' containing the nearest centroid. 4. Update the centroids of the clusters to reflect the new point memberships. 5. Repeat 3-4 until an update results in no change (this is referred to as convergence, a la sequences and series). \subsubsection{Applications} One particularly important application of unsupervised learning is constructing gene families. DNA is central to our existence; it contains all the information our cells (and ourselves) require to replicate. This information is stored physically in the famous double helix (usually wound up into histones) and can be extracted and represented holistically with strings (\verb|``ATGTCTATGAACCATC''|). Another example relates to some research I've been doing with self driving cars. To give you a little background, let's watch a short video\ldots. -spiel on sensors in the car \\ -spiel on sensor fusion \\ -spiel on localization \\ -spiel on calibration procedure, applying ransac \\ \section{Supervised Learning} \subsection{Regression} Regression is useful for creating a continuous model of a system based on inputs and outputs. \subsubsection{Theory} The derivation for linear regression is a minimization of the squared error term between the output function and the input dataset. \subsubsection{Applications} I'm just bringing this up since \subsection{Support Vector Machines} SVMs are useful for creating a discrete model of a system based on inputs and outputs. \subsubsection{Theory} It's all about finding the best set of dividing hyperplanes for our data\ldots \subsubsection{Applications} SVMs are probably my favorite classifier. -tesla spiel \\ -gesture recognition \\ -audio recognition (save for later) \\ \section{Reinforcement Learning} \subsection{Theory} There's actually not much more general reinforcement learning theory than the agent state action model here. However, each \subsubsection{Applications} box2d.js auv proj (link to RISE based control) \section{Common Pitfalls (Overfitting)} Most common pitfalls within machine learning lead to overfitting, a condition where a model describes random error, noise, or quirks peculiar to the model inputs, rather than describing the system it is meant to. \subsection{Classifier Choice} The choice of classifier has a significant effect on whether data is overfit or not. For example, fitting the following set of (clearly linear) data with a polynomial function results in severe overfitting: % XXX But applying a linear regression works just fine. Likewise, many machine learning algorithms apply something called a 'kernel' to the data they process. The SVM described earlier applied a 'linear' kernel; for very nonlinear data (i.e. orientations), a linear SVM could overfit/badly fit its data. \subsection{Sampling Bias} \subsection{Avoiding Overfitting} William of Ockham, Franciscan friar, theologian and philosopher, devised a principle hundreds of years ago now known as Occam's razor: In the absence of certainty, among competing hypotheses, the one with the fewest assumptions (i.e. the simplest one) should be selected. Occam's razor guides the application of machine learning classifiers in that adhering to it tends to minimize overfitting, as simpler classifiers have less room for overfitting. There also exist 'cross validation' techniques by which training data and test data are split up so as to ensure the model will extrapolate to data not contained in the input set. \section{Algorithm Design Exercises} Let's see if we can apply these techniques to a novel problem. Note that, as with many problems within data science/machine learning, there isn't necessarily a 'best' way to solve the following problem, as many different methods can arrive at models that are equally good. \subsection{Pokemon} You've been tasked with building an AI for Pokemon from scratch. At a high level, how would you go about doing this? You have unlimited time and resources. Let's say you're not starting from scratch, and you've been provided with data about every Pokemon Showdown match ever. How does this change your approach? \section{What is Audio?} \end{document}
{ "alphanum_fraction": 0.8033089934, "avg_line_length": 91.686440678, "ext": "tex", "hexsha": "1aab4394a4eb2a1df07374a31798f7d0e5033e75", "lang": "TeX", "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2020-01-01T02:14:06.000Z", "max_forks_repo_forks_event_min_datetime": "2016-09-17T13:57:39.000Z", "max_forks_repo_head_hexsha": "5994f5df14285f03059614b6fead7234ea46f613", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "darylsew/audiolearn", "max_forks_repo_path": "written/notes.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5994f5df14285f03059614b6fead7234ea46f613", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "darylsew/audiolearn", "max_issues_repo_path": "written/notes.tex", "max_line_length": 2855, "max_stars_count": 8, "max_stars_repo_head_hexsha": "5994f5df14285f03059614b6fead7234ea46f613", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "darylsew/audiolearn", "max_stars_repo_path": "written/notes.tex", "max_stars_repo_stars_event_max_datetime": "2019-07-18T02:00:58.000Z", "max_stars_repo_stars_event_min_datetime": "2015-08-03T18:34:49.000Z", "num_tokens": 2335, "size": 10819 }
% !TeX spellcheck = en_UK % \let\accentvec\vec \documentclass[runningheads,11pt]{llncs} \let\spvec\vec \let\vec\accentvec \newcommand\hmmax{0} \newcommand\bmmax{0} \DeclareFontFamily{U}{mathx}{\hyphenchar\font45} \DeclareFontShape{U}{mathx}{m}{n}{<-> mathx10}{} \DeclareSymbolFont{mathx}{U}{mathx}{m}{n} \DeclareMathAccent{\widebar}{0}{mathx}{"73} \let\spvec\vec \usepackage{amssymb,amsmath} \let\vec\spvec \usepackage{newtxmath,newtxtext} \usepackage[T1]{fontenc} \usepackage[most]{tcolorbox} \def\vec#1{\mathchoice{\mbox{\boldmath$\displaystyle#1$}} {\mbox{\boldmath$\textstyle#1$}} {\mbox{\boldmath$\scriptstyle#1$}} {\mbox{\boldmath$\scriptscriptstyle#1$}}} % lncs size (as printed in books, with small margins): % \usepackage[paperheight=23.5cm,paperwidth=15.5cm,text={13.2cm,20.3cm},centering]{geometry} %\usepackage{fullpage} \usepackage{soulutf8} \soulregister\cite7 \soulregister\ref7 \soulregister\pageref7 \usepackage{hyperref} \usepackage[color=yellow]{todonotes} \hypersetup{final} \usepackage{mathrsfs} \usepackage[advantage,asymptotics,adversary,sets,keys,ff,lambda,primitives,events,operators,probability,logic,mm,complexity]{cryptocode} \usepackage[capitalise]{cleveref} %\crefname{appendix}{Supp.~Mat.}{Supp.~Mat.} %\Crefname{appendix}{Supp.~Mat.}{Supp.~Mat.} \usepackage{cite} \usepackage{booktabs} \usepackage{paralist} \usepackage[innerleftmargin=5pt,innerrightmargin=5pt]{mdframed} \usepackage{caption} \captionsetup{belowskip=0pt} \usepackage{bm} \usepackage{url} %\usepackage{dirtytalk} \usepackage[margin=1in,a4paper]{geometry} \usepackage[normalem]{ulem} \usepackage{dashbox} \newcommand\dboxed[1]{\dbox{\ensuremath{#1}}} \usepackage{setspace} \include{macros} %% Save the class definition of \subparagraph \let\llncssubparagraph\subparagraph %% Provide a definition to \subparagraph to keep titlesec happy \let\subparagraph\paragraph %% Load titlesec \usepackage[compact]{titlesec} %% Revert \subparagraph to the llncs definition \let\subparagraph\llncssubparagraph \newcommand{\oursubsub}[1] {\smallskip\noindent\textbf{#1}} \newcommand{\ourpar}[1] {\smallskip\noindent\emph{#1}} %\title{On Simulation-Extractability of Universal zkSNARKs} \title{Non-Malleability of the Fiat--Shamir Transform Revisited for Multi-round SRS-Based Protocols} %\author{Anonymous submission to Asiacrypt} \author{Chaya Ganesh \and Hamidreza Khoshakhlagh \and Markulf Kohlweiss\inst{1,2} \and Anca Nitulescu \and Michał Zając\inst{3}} \institute{University of Edinburgh, Edinburgh, UK \and IOHK \\ \email{[email protected]} \and Clearmatics, London, UK \\ \email{[email protected]}} \allowdisplaybreaks \begin{document} \sloppy \titlerunning{Non-Malleability of the FS transform Revisited [\ldots]} \maketitle \begin{abstract} The Fiat--Shamir transformation turns public-coin (three round) sigma protocol into signature schemes, non-interactive proof systems, and signatures of knowledge (SoK). The security of the transformation relies on a powerful forking lemma that extracts the secret key or the witness, even in the presence of signing queries for signatures and simulation queries for prove systems and SoK, respectively. We extend this line of work and formally define simulation extractability for protocols that use both the random oracle model (ROM) and a structured reference string (SRS). We then show sufficient conditions for compiling via the Fiat--Shamir transformation public-coin multi-round interactive protocol with SRS into simulation-extractable NIZK proof systems. We also consider the case that the SRS is updatable and define a strong simulation extractability notion that allows for simulated proofs with respect to past and present SRS. In the ROM, we obtain simulation-extractable and updatable NIZKs. Importantly, we show that three popular zero knowledge SNARKs --- Plonk, Sonic, and Marlin --- are simulation extractable out-of-the-box. This also results in the first construction of update simulation-extractable SNARKs and succinct updatable SoK. % Faust, Kohlweiss, Marson, and Venturi (INDOCRYPT 2012) showed that % non-interactive zero knowledge (NIZK) proof systems obtained by applying the % Fiat--Shamir transformation to a public-coin sigma protocol are simulation % sound and simulation extractable under lenient conditions. In this paper, we % extend this work and formally define (updatable) simulation extractability for % protocols in the random oracle model (ROM) which also use a structured % reference string (SRS). Furthermore, we show that NIZK proof systems obtained % by applying the Fiat--Shamir transformation to a public-coin multi-round % interactive protocol with SRS are updatable simulation-extractable under % lenient conditions. A consequence of our result is that, in the ROM, we obtain % non-malleable NIZKs essentially for free from a much wider class of protocols % than Faust et al. Importantly, we show that three popular zero knowledge % SNARKs --- \plonk{}~\cite{EPRINT:GabWilCio19}, \sonic{}~\cite{CCS:MBKM19}, and % $\marlin$~\cite{EC:CHMMVW20} --- are updatable simulation extractable. \end{abstract} \input{intro} \section{Preliminaries} \label{sec:preliminaries} Let $\ppt$ denote probabilistic polynomial-time and $\secpar \in \NN$ be the security parameter. All adversaries are stateful. For an algorithm $\adv$, let $\image (\adv)$ be the image of $\adv$ (the set of valid outputs of $\adv$), let $\RND{\adv}$ denote the set of random tapes of correct length for $\adv$ (assuming the given value of $\secpar$), and let $r \sample \RND{\adv}$ denote the random choice of the randomiser $r$ from $\RND{\adv}$. We denote by $\negl$ ($\poly$) an arbitrary negligible (resp.~polynomial) function. Probability ensembles $X = \smallset{X_\secpar}_\secpar$ and $Y = \smallset{Y_\secpar}_\secpar$, for distributions $X_\secpar, Y_\secpar$, have \emph{statistical distance} $\SD$ equal $\epsilon(\secpar)$ if $\sum_{a \in \supp{X_\secpar \cup Y_\secpar}} \abs{\prob{X_\secpar = a} - \prob{Y_\secpar = a}} = \epsilon(\secpar)$. We write $X \approx_\secpar Y$ if $\SD(X_\secpar, Y_\secpar) \leq \negl$. For values $a(\secpar)$ and $b(\secpar)$ we write $a(\secpar) \approx_\secpar b(\secpar)$ if $\abs{a(\secpar) - b(\secpar)} \leq \negl$. \newcommand{\samplespace}{\Omega} \newcommand{\eventspace}{\mathcal{F}} \newcommand{\probfunction}{\mu} For a probability space $(\samplespace, \eventspace, \probfunction)$ and event $\event{E} \in \eventspace$ we denote by $\nevent{E}$ an event that is complementary to $\event{E}$, i.e.~$\nevent{E} = \samplespace \setminus \event{E}$. Denote by $\RELGEN = \smallset{\REL}$ a family of relations. We assume that if $\REL$ comes with any auxiliary input, it is benign. Directly from the description of $\REL$ one learns security parameter $\secpar$ and other necessary information like public parameters $\pp$ containing description of a group $\GRP$, if the relation is a relation of group elements (as it usually is in case of zkSNARKs). \ourpar{Bilinear groups.} A bilinear group generator $\pgen (\secparam)$ returns public parameters $ \pp = (p, \GRP_1, \GRP_2, \GRP_T, \pair, \gone{1}, \gtwo{1})$, where $\GRP_1$, $\GRP_2$, and $\GRP_T$ are additive cyclic groups of prime order $p = 2^{\Omega (\secpar)}$, $\gone{1}, \gtwo{1}$ are generators of $\GRP_1$, $\GRP_2$, resp., and $\pair: \GRP_1 \times \GRP_2 \to \GRP_T$ is a non-degenerate $\ppt$-computable bilinear pairing. We assume the bilinear pairing to be Type-3, i.e., that there is no efficient isomorphism from $\GRP_1$ to $\GRP_2$ or from $\GRP_2$ to $\GRP_1$. We use the by now standard bracket notation, i.e., we write $\bmap{a}{\gi}$ to denote $a g_{\gi}$ where $g_{\gi}$ is a fixed generator of $\GRP_{\gi}$. We denote $\pair (\gone{a}, \gtwo{b})$ as $\gone{a} \bullet \gtwo{b}$. Thus, $\gone{a} \bullet \gtwo{b} = \gtar{a b}$. We freely use the bracket notation with matrices, e.g., if $\vec{A} \vec{B} = \vec{C}$ then $\vec{A} \grpgi{\vec{B}} = \grpgi{\vec{C}}$ and $\gone{\vec{A}}\bullet \gtwo{\vec{B}} = \gtar{\vec{C}}$. Since every algorithm $\adv$ takes as input the public parameters we skip them when describing $\adv$'s input. Similarly, we do not explicitly state that each protocol starts with generating these parameters by $\pgen$. \subsection{Computational assumptions.} \ourpar{Discrete-log assumptions.} Security of $\plonk$ and $\sonic$ relies on two discrete-log based security assumptions---$(q_1, q_2)$-$\dlog$ assumption and its variant that allows for negative exponents $(q_1, q_2)$-$\ldlog$ assumption\footnote{Note that \cite{CCS:MBKM19} dubs their assumption \emph{a dlog assumption}. We changed that name to distinguish it from the more standard dlog assumption used in \cite{EPRINT:GabWilCio19}. ``l'' in \emph{ldlog} relates to use of Laurent polynomials in the assumption.}. We omit here description of the assumptions and refer to \cref{sec:dlog_assumptions}. \ourpar{Proofs by Game-Hopping.} Proofs by \emph{game hopping} is a method of writing proofs popularised by e.g.~Shoup \cite{EPRINT:Shoup04} and Dent \cite{EPRINT:Dent06c}. The method relies on the following lemma. \begin{lemma}[Difference lemma,~{\cite[Lemma 1]{EPRINT:Shoup04}}] \label{lem:difference_lemma} Let $\event{A}, \event{B}, \event{F}$ be events defined in some probability space, and suppose that $\event{A} \land \nevent{F} \iff \event{B} \land \nevent{F}$. Then $ \abs{\prob{\event{A}} - \prob{\event{B}}} \leq \prob{\event{F}}\,. $ \end{lemma} \subsection{Algebraic Group Model} The algebraic group model (AGM) introduced in \cite{C:FucKilLos18} lies between the standard model and generic bilinear group model\hamid{not clear why generic bilinear group model and not just generic group model!}. In the AGM it is assumed that an adversary $\adv$ can output a group element $\gnone{y} \in \GRP$ if $\gnone{y}$ has been computed by applying group operations to group elements given to $\adv$ as input. It is further assumed, that $\adv$ knows how to ``build'' $\gnone{y}$ from that elements. More precisely, the AGM requires that whenever $\adv(\gnone{\vec{x}})$ outputs a group element $\gnone{y}$ then it also outputs $\vec{c}$ such that $\gnone{y} = \vec{c}^\top \cdot \gnone{\vec{x}}$. Both $\plonk$ and $\sonic$ have been shown secure using the AGM. An adversary that works in the AGM is called \emph{algebraic}. \subsection{Polynomial commitment} \label{sec:poly_com} In the polynomial commitment scheme $\PCOM = (\kgen, \com, \open, \verify)$ the committer $\committer$ can convince the receiver $\receiver$ that some polynomial $\p{f}$ which $\committer$ committed to evaluates to $s$ at some point $z$ chosen by $\receiver$. $\PCOM$'s subroutines are defined as follows \begin{description} \item[$\kgen(1^\secpar, \maxdeg)$:] The key generation algorithm $\kgen(1^\secpar, \maxdeg)$ takes in a security parameter $1^\secpar$ and a parameter $\maxdeg$ which determines the maximal degree of the committed polynomial. It outputs a structured reference string $\srs$ (including a commitment key). \item[$\com(\srs, \p{f})$:] The commitment algorithm $\com(\srs, \p{f})$ takes in $\srs$ and a polynomial $\p{f}$ with maximum degree $\maxdeg$, and outputs a commitment $c$. \item[$\open(\srs, z, s, \p{f})$:] The opening algorithm $\open(\srs, z, s \p{f})$ takes as input $\srs$, an evaluation point $z$, a value $s$ and the polynomial $\p{f}$. It outputs an opening $o$. \item[$\verify(\srs, c, z, s, o)$:] The verification algorithm takes in $\srs$, a commitment $c$, an evaluation point $z$, a value $s$ and an opening $o$. It outputs 1 if $o$ is a valid opening for $(c, z, s)$ and 0 otherwise. \end{description} $\plonk$ and $\sonic$ use variants of the KZG polynomial commitment scheme \cite{AC:KatZavGol10}. We denote the first by $\PCOMp$ and the latter by $\PCOMs$. Due to page limit, we omit their presentation here and refer to \cref{fig:pcomp} and \cref{fig:pcoms} in the \cref{sec:pcom}. In this paper we use evaluation binding, commitment of knowledge, and, newly introduced, unique opening and hiding properties. Formal definitions of these could be find in \cref{sec:pcom}, here we briefly introduce them. \begin{compactdesc} \item[Evaluation binding] intuitively, this property assures that no adversary could provide two valid openings for two different evaluations of the same commitment in the same point. \item[Commitment of knowledge] when a commitment scheme is ``of knowledge'' then if an adversary produces a (valid) commitment $c$, which it can open, then it also knows the underlying polynomial $\p{f}$ which commits to that value. \cite{CCS:MBKM19} shows, using AGM, that $\PCOMs$ is a commitment of knowledge. The same reasoning could be used to show that property for $\PCOMp$. \item[Unique opening] this property assures that there is only one valid opening for the committed polynomial and given evaluation point. This property is crucial in showing forking simulation-extractability of $\plonk$ and $\sonic$. We show that the $\plonk$'s and $\sonic$'s polynomial commitment schemes satisfy this requirement in \cref{lem:pcomp_op} and \cref{lem:pcoms_unique_op} respectively. \item[Hiding] assures that no adversary is able to tell anything about the polynomial given only its commitment and bounded number of evaluations. \end{compactdesc} \subsection{Zero knowledge} In a zero-knowledge proof system, a prover convinces the verifier of veracity of a statement without leaking any other information. The zero-knowledge property is proven by constructing a simulator that can simulate the view of a cheating verifier without knowing the secret information -- witness -- of the prover. A proof system has to be sound as well, i.e.~for a malicious prover it should be infeasible to convince a verifier on a false statement. Here, we focus on proof systems, so-called arguments, that guarantee soundness against $\ppt$ malicious provers. More precisely, let $\RELGEN(\secparam) = \smallset{\REL}$ be a family of \changedm{universal} relations. Let $\prover$ and $\verifier$ be $\ppt$ algorithms, the former called \emph{prover} and the latter \emph{verifier}. We allow our proof system to have a setup, i.e.~there is a $\kgen$ algorithm that takes as input the relation description $\REL$ and outputs a common reference string $\srs$. We assume that the $\srs$ defines the relation and for universal prove systems, such as Plonk and Sonic, we treat both the reference string and the relation as universal. We denote by $\zkproof$ a proof created by $\prover$ with input $(\srs, \inp, \wit)$. We say that proof is acceptable if $\verifier \srs, \inp, \zkproof)$ accepts it. A proof system $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ for $\RELGEN$ is required to have three properties: completeness, soundness and zero knowledge, which are defined as follows: % \begin{description} \ourpar{Completeness.} %\item[Completeness] An interactive proof system $\proofsystem$ is \emph{complete} if an honest prover always convinces an honest verifier, that is for all $\REL \in \RELGEN(\secparam)$ and $(\inp, \wit) \in \REL$ \[ \condprob{\ip{\prover (\srs, \inp, \wit)}{\verifier (\srs, \inp)} = 1}{\srs \gets \kgen(\REL)} = 1\,. \] % \item[Soundness] \ourpar{Soundness.} We say that $\proofsystem$ for $\RELGEN$ is \emph{sound} if no $\ppt$ prover $\adv$ can convince an honest verifier $\verifier$ to accept a proof for a false statement $\inp \not\in\LANG$. More precisely, for all $\REL \in \RELGEN(\secparam)$ \[ \condprob{\ip{\adv(\srs, \inp)}{\verifier(\srs, \inp)} = 1 \land \inp \not\in \LANG_\REL}{\srs \gets \kgen(\REL), \inp \gets \adv(\srs)} \leq \negl\,; \] %\end{description} Sometimes a stronger notion of soundness is required---except requiring that the verifier rejects proofs of statements outside the language, we request from the prover to know a witness corresponding to the proven statement. This property is called \emph{knowledge soundness}.%\markulf{Commented out the formal definition as we don't use it.} % We call an interactive proof system $\proofsystem$ % \emph{knowledge-sound} if for any $\REL \in \RELGEN(\secparam)$ and a $\ppt$ % adversary $\adv$ % \[ % \Pr\left[ % \begin{aligned} % & \verifier(\srs, \inp, \zkproof) = 1, \\ % & \REL(\inp, \wit) = 0 % \end{aligned} % \,\left|\, % \begin{aligned} % & \srs \gets \kgen(\REL), \inp \gets \adv(\srs), \\ % & (\wit, \zkproof) \gets \ext^{\ip{\adv(\srs, \inp)}{\verifier(\srs, \inp)}}(\REL, \inp) % \end{aligned} % \vphantom{\begin{aligned} % \adv (\zkproof) = 1, \\ % \text{if $\zkproof{}$ is accepting} \\ % \pcind \text{then $\REL(\inp, \wit)$} % \end{aligned}}\right. % \right] \leq \negl\,, % \] % \end{description} % Usually the verifier verifies messages send by the prover by checking a number % of equations depend on the instance, SRS and the proof sent. These equations % are often called \emph{verification equations} and denoted $\vereq_i$, for $i$ % being an index of the equation. It is usually required that an acceptable proof % yields $\vereq_i = 0$. In the proof systems we consider---$\plonk$ and % $\sonic$---verification equations can be seen as polynomials evaluated at the % trapdoor $\chi$. Thus, the verifier checks that $\vereq_i(\chi) = % 0$. Sometimes, we consider an \emph{idealised verifier}, % cf.~\cite{EPRINT:GabWilCio19}, who instead of checking that polynomial % $\vereq_i(X)$ evaluates to $0$ at $\chi$ just checks that $\vereq_i(X)$ is a % zero polynomial. \ourpar{Zero knowledge.} We call a proof system $\proofsystem$ \emph{zero-knowledge} if for any $\REL \in \RELGEN(\secparam)$, and adversary $\adv$ there exists a $\ppt$ simulator $\simulator$ such that for any $(\inp, \wit) \in \REL$ \begin{multline*} \left\{\ip{\prover(\srs, \inp, \wit)}{\adv(\srs, \inp, \wit)} \,\left|\, \srs \gets \kgen(\REL)\COMMENT{, (\inp, \wit) \gets \adv(\REL, \srs)}\vphantom{\simulator^\adv}\right.\right\} \approx_\secpar %\\ \left\{\simulator^{\adv}(\srs, \inp)\,\left|\, \srs \gets \kgen(\REL)\COMMENT{, (\inp, \wit) \gets \adv(\REL, \srs)}\vphantom{\simulator^\adv}\right.\right\}\,. \end{multline*} % We call zero knowledge \emph{perfect} if the distributions are equal and \emph{computational} if they are indistinguishable for any $\ppt$ distinguisher. % \end{description} Alternatively, zero-knowledge can be defined by allowing the simulator to use the trapdoor $\td$ that is generated along the $\srs$. In this paper we distinguish simulators that requires a trapdoor to simulate and those that do not. We call the former \emph{SRS-simulators}. We say that a protocol is zero knowledge in the standard model if its simulator does not require the trapdoor. % Occasionally, a weaker version of zero knowledge is sufficient. So called % \emph{honest verifier zero knowledge} (HVZK) assumes that the verifier's % challenges are picked at random from some predefined set. Although weaker, this % definition suffices in many applications. Especially, an interactive % zero-knowledge proof that is HVZK and \emph{public-coin} (i.e.~the verifier % outputs as challenges its random coins) can be made non-interactive and % zero-knowledge in the random oracle model by using the Fiat--Shamir % transformation. In security reductions in this paper it is sometimes needed to produce simulated NIZK proofs without knowning the trapdoor, just by programming the random oracle. We call protocols which allow for such kind of simulation \emph{trapdoor-less simulatable} (TLZK). More precisely, \begin{definition}[Trapdoor-less simulatable proof system] Let $\ps = (\kgen, \prover, \verifier, \simulator)$ be a NIZK proof system and $\ro$ a random oracle. Let $\simulator$ be ana pair of algorithms: $\simulator_\ro$ that takes random oracle queries and answers them, $\simulator_\prover$ that takes as input an SRS $\srs$ and instance $\inp$ and outputs a proof $\zkproof_\simulator$. We call $\ps$ \emph{trapdoor-less simulatable} if for any adversary $\adv$, $\eps_0 \approx \eps_1$, where \begin{align} \eps_b = \Pr\left[ \begin{aligned} \adv^{\oracleo_b}(\srs) = 0 \end{aligned} \, \left| \, \begin{aligned} \srs \sample \kgen(\secpar) \end{aligned} \right.\right] \end{align} where $\oracleo_b$ takes two types of adversary's queries: \begin{description} \item[random oracle calls:] on $\adv$'s query $x$, $\oracleo_b$ responds with $\ro(x)$ if $b = 0$, and with $y \gets \simulator_\ro(\srs, x)$, if $b = 1$. \item[proof calls:] on $\adv$'s query $\inp, \wit$ responds with a real proof $\zkproof_\prover \gets \prover(\srs, \inp, \wit)$ if $b = 0q$ or a simulated proof $\zkproof_\simulator \gets \simulator (\srs, \inp)$ if $b = 1$. \end{description} \end{definition} % In our simulation soundness proof (but not simulation extractability % \hamid{you mean forking simulation extractability?}) we need an % additional property of the zero-knowledge proof system which we call % $k$-programmable ZK. \begin{definition}[$k$-programmable ZK] \label{def:kzk} Let $\ps$ be a $(2\mu + 1)$-message ZK proof system and let $\ps_\fs$ be its Fiat--Shamir variant. We say that $\ps_\fs$ is $k$-programmable ZK if there exists a simulator $\simulator_\fs$ that \begin{compactenum} \item produces proofs indistinguishable from proofs output by an honest prover; \item $\simulator_\fs$ programs the random oracle \emph{only} for challenges from round $k$ to $\mu + 1$. \end{compactenum} \end{definition} We note that $\plonk$ is $2$-programmable ZK, $\sonic$ is $1$-programmable ZK, and $\marlin$ is $1$-programmable ZK. This follows directly from the proofs of their standard model zero-knowledge property in \cref{lem:plonk_hvzk,lem:sonic_hvzk, lem:marlin_hvzk}. \oursubsub{Idealised verifier and verification equations} Let $(\kgen, \prover, \verifier)$ be a proof system % or a polynomial commitment % scheme\hamid{might be unclear as we are defining polynomial commitments as % $(\kgen, \com, \open, \verify)$.}. Observe that the $\kgen$ algorithm provides an SRS which can be interpreted as a set of group representation of polynomials evaluated at trapdoor elements. E.g.~for a trapdoor $\chi$ the SRS contains $\gone{\p{p_1}(\chi), \ldots, \p{p_k}(\chi)}$, for some polynomials $\p{p_1}(X), \ldots, \p{p_k}(X) \in \FF_p[X]$. On the other hand, the verifier $\verifier$ accepts if a (possibly set of) verification equation $\vereq_{\inp, \zkproof}$ (note that the verification equation changes relate to the instance $\inp$ and proof $\zkproof$), which can also be interpreted as a polynomial in $\FF_p[X]$ whose coefficients depend on messages sent by the prover, zeroes at $\chi$. Following \cite{EPRINT:GabWilCio19} we call verifiers who checks that $\vereq_{\inp, \zkproof}(\chi) = 0$ \emph{real verifiers} as opposed to \emph{ideal verifiers} who accepts only when $\vereq_{\inp, \zkproof}(X) = 0$. That is, while a real verifier accepts when a polynomial \emph{evaluates} to zero, an ideal verifier accepts only when the polynomial \emph{is} zero. Although ideal verifiers are impractical, they are very useful in our proofs. More precisely, we show that \begin{compactenum} \item the idealised verifier accepts an incorrect proof (what ``incorrect'' means depends on the situation) with at most negligible probability (and many cases---never); \item when the real verifier accepts, but not the idealised one, then we show how to use a malicious $\prover$ to break the underlying security assumption (in our case---a variant of $\dlog$.) \end{compactenum} Analogously, idealised verifier can also be defined for polynomial commitment scheme. \oursubsub{Sigma protocols} A sigma protocol $\sigmaprot = (\prover, \verifier, \simulator)$ for a relation $\REL \in \RELGEN(\secparam)$ is a special case of an interactive proof where a transcript consists of three messages $(a, b, z)$, where $b$ is a challenge provided by the verifier. Sigma protocols are honest verifier zero-knowledge in the standard model and specially-sound. That is, there exists an extractor $\ext$ which given two accepting transcripts $(a, b, z)$, $(a, b', z')$ for a statement $\inp$ can recreate the corresponding witness if $b \neq b'$. More formally: % \begin{description} \ourpar{Special soundness.} % \hamid{The last (short) sentence looks a little unclear.} A sigma protocol $\sigmaprot$ is \emph{specially-sound} if for any adversary $\adv$ the probability \[ \Pr\left[ \begin{aligned} & \verifier(\REL, \inp, (a, b, z)) = %\\ \verifier(\REL, \inp, (a, b', z')) = 1 \\ & \land b \neq b' \land \REL(\inp, \wit) = 0 \\ \end{aligned} \,\left|\, \begin{aligned} & (\inp, (a, b, z), (a, b', z')) \gets \adv(\REL), \\ %\\ & \wit \gets \ext(\REL, \inp, (a, b, z), (a, b', z'))\\ \end{aligned} \right.\right] \] is upper-bounded by some negligible function $\negl$. %\end{description} Another property that sigma protocols may have is a unique response property \cite{C:Fischlin05} which states that no $\ppt$ adversary can produce two accepting transcripts that differ only on the last element. More precisely, %\begin{description} \ourpar{Unique response property.} Let $\sigmaprot = (\prover, \verifier, \simulator)$ be a sigma-protocol for $\REL \in \RELGEN(\secparam)$ with proofs of the form $(a, b, z)$. We say that $\sigmaprot$ has the unique response property if for all $\ppt$ algorithms $\adv$, it holds that,: \[ \condprob{\verifier (\REL, \inp, (a, b, z)) = \verifier (\REL, \inp, (a, b, z')) = 1 \land z \neq z'}{(\inp, a, b, z, z') \gets \adv(\REL)} \leq \negl\,. \] %\end{description} If this property holds even against unbounded adversaries, it is called \emph{strict}, cf.~\cite{INDOCRYPT:FKMV12}. Later on we call protocols that follows this notion \emph{ur-protocols}. For the sake of completeness we note that many sigma protocols, like e.g.~Schnorr's protocol \cite{C:Schnorr89}, fulfil this property. \subsection{From interactive to non-interactive---the Fiat--Shamir transform} Consider a $(2\mu + 1)$-message, public-coin, honest verifier zero-knowledge interactive proof system $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ for $\REL \in \RELGEN(\secparam)$. Let $\zkproof$ be a proof performed by the prover $\prover$ and verifier $\verifier$ compound of messages $(a_1, b_1, \ldots, a_{\mu}, b_{\mu}, a_{\mu + 1})$, where $a_i$ comes from $\prover$ and $b_i$ comes from $\verifier$. Denote by $\ro$ a random oracle. Let $\proofsystem_\fs = (\kgen_\fs, \prover_\fs, \verifier_\fs, \simulator_\fs)$ be a proof system such that \begin{compactitem} \item $\kgen_\fs$ behaves as $\kgen$. \item $\prover_\fs$ behaves as $\prover$ except after sending message $a_i$, $i \in \range{1}{\mu}$, the prover does not wait for the message from the verifier but computes it locally setting $b_i = \ro(\zkproof[0..i])$, where $\zkproof[0..j] = (\inp, a_1, b_1, \ldots, a_{j - 1}, b_{j - 1}, a_j)$. (Importantly, $\zkproof[0..\mu + 1] = (\inp, \zkproof)$). \item $\verifier_\fs$ behaves as $\verifier$ but does not provide challenges to the prover's proof. Instead it computes the challenges locally as $\prover_\fs$ does. Then it verifies the resulting transcript $\zkproof$ as the verifier $\verifier$ would. \item $\simulator_\fs$ behaves as $\simulator$, except when $\simulator$ picks challenge $b_i$ before computing message $\zkproof[0, i]$, $\simulator_\fs$ programs the random oracle to output $b_i$ on $\zkproof[0, i]$. \end{compactitem} \noindent The Fiat--Shamir heuristic states that $\proofsystem_\fs$ is a zero-knowledge non-interactive proof system for $\REL \in \RELGEN(\secparam)$. \subsection{Non-malleability definitions for NIZKs} \label{sec:simext_def} Real life applications often require a NIZK proof system to be non-malleable. That is, no adversary seeing a proof $\zkproof$ for a statement $\inp$ should be able to provide a new proof $\zkproof'$ related to $\zkproof$. \emph{Simulation extractability} formalizes a strong version of non-malleability by requiring that no adversary can produce a valid proof without knowing the corresponding witness. This must hold even if the adversary is allowed to see polynomially many simulated proofs for any statements it wishes. %\chaya{remove reference to forking soundness. quantify for $\ext_\se$} \begin{definition}[Forking simulation-extractable NIZK, \cite{INDOCRYPT:FKMV12}] \label{def:simext} Let $\ps_\fs = (\kgen_\fs, \prover_\fs, \verifier_\fs, \simulator_\fs)$ be a HVZK proof system\hamid{$\ps_\fs$ is the Fiat-Shamir variant of the underlying proof system. So maybe we mean the underlying proof system is HVZK?}. We say that $\ps_\fs$ is \emph{forking simulation-extractable} with \emph{extraction error} $\nu$ if for any $\ppt$ adversary $\adv$ that is given oracle access to a random oracle $\ro$ and simulator $\simulator_\fs$, and produces an accepting transcript of $\ps$ with probability $\accProb$, where \[ \accProb = \Pr \left[ \begin{aligned} & \verifier_\fs(\srs, \inp_{\advse}, \zkproof_{\advse}) = 1,\\ & (\inp_{\advse}, \zkproof_{\advse}) \not\in Q \end{aligned} \, \left| \, \begin{aligned} & \srs \gets \kgen_\fs(\REL), r \sample \RND{\advse}, \\ & (\inp_{\advse}, \zkproof_{\advse}) \gets \advse^{\simulator_\fs, \ro} (\srs; r) \end{aligned} \right.\right]\,, \] there exists an extractor $\extse$ such that \[ \extProb = \Pr \left[ \begin{aligned} & \verifier_\fs(\srs, \inp_{\advse}, \zkproof_{\advse}) = 1,\\ & (\inp_{\advse}, \zkproof_{\advse}) \not\in Q,\\ & \REL(\inp_{\advse}, \wit_{\advse}) = 1 \end{aligned} \, \left| \, \begin{aligned} & \srs \gets \kgen_\fs(\REL), r \sample \RND{\advse},\\ & (\inp_{\advse}, \zkproof_{\advse}) \gets \advse^{\simulator_\fs, \ro} (\srs; r) \\ & \wit_{\advse} \gets \ext_\se (\srs, \advse, r, \inp_{\advse}, \zkproof_{\advse}, Q, Q_\ro) \end{aligned} \right.\right] \] is at at least \[ \extProb \geq \frac{1}{\poly} (\accProb - \nu)^d - \eps(\secpar)\,, \] for some polynomial $\poly$, constant $d$ and negligible $\eps(\secpar)$ whenever $\accProb \geq \nu$. List $Q$ contains all $(\inp, \zkproof)$ pairs where $\inp$ is an instance provided to the simulator by the adversary and $\zkproof$ is the simulator's answer. List $Q_\ro$ contains all $\advse$'s queries to $\ro$ and $\ro$'s answers. \end{definition} % Consider a sigma protocol $\sigmaprot = (\prover, \verifier, \simulator)$ that % is special-sound and has a unique response property. Let $\sigmaprot_\fs = % (\prover_\fs, \verifier_\fs, \simulator_\fs)$ be a NIZK obtained by applying the % Fiat--Shamir transform to $\sigmaprot$. Faust et al.~\cite{INDOCRYPT:FKMV12} % show that every such $\sigmaprot_\fs$ is forking simulation-extractable. This result is % presented in \cref{sec:forking_lemma} along with the instrumental forking lemma, % cf.~\cite{CCS:BelNev06}. \iffalse \noindent \textbf{Simulation sound NIZKs.} Another notion for non-malleable NIZKs is \emph{simulation soundness}. It allows the adversary to see simulated proof, however, in contrast to simulation extractability it does not require an extractor to provide a witness for the proven statement. Instead, it is only necessary, that an adversary who sees simulated proofs cannot make the verifier accept a proof of an incorrect statement. More precisely, \chaya{this definition will go} \begin{definition}[Simulation soundness] \label{def:simsnd} Let $\ps = (\kgen, \prover, \verifier, \simulator)$ be a NIZK proof and $\ps_\fs = (\kgen_\fs, \prover_\fs, \verifier_\fs, \simulator_\fs)$ be $\ps$ transformed by the Fiat--Shamir transform. We say that $\ps_\fs$ is \emph{simulation-sound} for any $\ppt$ adversary $\adv$ that is given oracle access to a random oracle $\ro$ and simulator $\simulator_\fs$, probability \[ \ssndProb = \Pr\left[ \begin{aligned} & \verifier_\fs(\srs, \inp_{\adv}, \zkproof_{\adv}) = 1,\\ & (\inp_{\advse}, \zkproof_{\advse}) \not\in Q,\\ & \neg \exists \wit_{\adv}: \REL(\inp_{\adv}, \wit_{\adv}) = 1 \end{aligned} \, \left| \, \vphantom{\begin{aligned} & \verifier_\fs(\srs, \inp_{\adv}, \zkproof_{\adv}) = 1,\\ & (\inp_{\advse}, \zkproof_{\advse}) \not\in Q,\\ & \neg \exists \wit_{\adv}: \REL(\inp_{\adv}, \wit_{\adv}) = 1 \end{aligned}} \begin{aligned} & \srs \gets \kgen(\REL), r \sample \RND{\advse},\\ & (\inp_{\advse}, \zkproof_{\advse}) \gets \advse^{\simulator_\fs, \ro} (\srs; r) \end{aligned} \right. \right] \] is at most negligible. List $Q$ contains all $(\inp, \zkproof)$ pairs where $\inp$ is an instance provided to the simulator by the adversary and $\zkproof$ is the simulator's answer. \end{definition} \label{rem:simext_to_simsnd} We note that the probability $\ssndProb$ \cref{def:simsnd} can be expressed in terms of simulation-extractability. More precisely, the condition $\neg \exists \wit: \REL(\inp_\adv, \wit_\adv) = 1$ can be substituted with $\REL(\inp_\adv, \wit_\adv) = 0$, where $\wit_\adv$, returned by a possibly unbounded extractor, is either a witness to $\inp_\adv$ (if there exists any) or $\bot$ (if there is none). More precisely, \[ \ssndProb = \Pr\left[ \begin{aligned} & \verifier_\fs(\srs, \inp_{\adv}, \zkproof_{\adv}) = 1,\\ & (\inp_{\advse}, \zkproof_{\advse}) \not\in Q,\\ & \REL(\inp_{\adv}, \wit_{\adv}) = 0 \end{aligned} \, \left| \, \begin{aligned} & \srs \gets \kgen(\REL), r \sample \RND{\advse},\\ & (\inp_{\advse}, \zkproof_{\advse}) \gets \advse^{\simulator_\fs, \ro} (\srs; r)\\ & \wit_{\adv} \gets \ext(\srs, \advse, r, \inp_{\advse}, \zkproof_{\advse}, Q, Q_\ro,) \end{aligned} \right. \right]. \] The only necessary input to the unbounded extractor $\ext$ is the instance $\inp_\adv$ (the rest is given for the consistency with the simulation extractability definition). % With the probabilities in \cref{def:simext} holding regardless of whether the extractor is unbounded or not, we obtain the following equality $ \ssndProb = \accProb - \extProb$. % In \cref{cor:simext_to_ssnd} we show that (under some mild conditions) this is enough % to conjecture that probability $\ssndProb$ is not only at most negligible, but % also, in some parameters, exponentially smaller than $(1 - \extProb)$ % (probability of extraction failure in \cref{def:simext}). \fi \section{Definitions and lemmas for multi-round SRS-based protocols} \label{sec:se_definitions} \chaya{move the USE definition to this section?} The result of Faust et al.\cite{INDOCRYPT:FKMV12} do not apply to our setting since the protocols we consider have an SRS, more than three messages, require more than just two transcripts for standard model extraction and are not special sound. We thus adapt special soundness to forking soundness, and generalize the forking lemma and the unique response property to make them compatible with multi-round SRS-based protocols. \subsection{Generalised forking lemma.} %\label{sec:forking_lemma} First of all, although dubbed ``general'', \cref{lem:forking_lemma} is not general enough for our purpose as it is useful only for protocols where witness can be extracted from just two transcripts. To be able to extract a witness from, say, an execution of $\plonkprot$ we need to obtain at least $(3 \numberofconstrains + 1)$ valid proofs, and $(\noofc + 1)$ for $\sonicprot$. Here we propose a generalisation of the general forking lemma that given probability of producing an accepting transcript, $\accProb$, lower-bounds the probability of generating a \emph{tree of accepting transcripts} $\tree$, which allows to extract a witness. \begin{definition}[Tree of accepting transcripts, cf.~{\cite{EC:BCCGP16}}] \label{def:tree_of_accepting_transcripts} Consider a $(2\mu + 1)$-message interactive proof system $\ps$. A $(n_1, \ldots, n_\mu)$-tree of accepting transcripts is a tree where each node on depth $i$, for $i \in \range{1}{\mu + 1}$, is an $i$-th prover's message in an accepting transcript; edges between the nodes are labeled with verifier's challenges, such that no two edges on the same depth have the same label; and each node on depth $i$ has $n_{i} - 1$ siblings and $n_{i + 1}$ children. The tree consists of $N = \prod_{i = 1}^\mu n_i$ branches, where $N$ is the number of accepting transcripts. We require $N = \poly$. \end{definition} \begin{lemma}[General forking lemma II] \label{lem:generalised_forking_lemma} Fix $q \in \ZZ$ and set $H$ of size $h \geq m$. Let $\zdv$ be a $\ppt$ algorithm that on input $y, h_1, \ldots, h_q$ returns $(i, s)$ where $i \in \range{0}{q}$ and $s$ is called a side output. Denote by $\ig$ a randomised instance generator. We denote by $\accProb$ the probability \[ \condprob{i \neq 0}{ y \gets \ig;\ h_1, \ldots, h_q \sample H;\ (i, s) \gets \zdv(y, h_1, \ldots, h_q)}\,. \] Let $\genforking_{\zdv}^{m}$ denote the algorithm described in \cref{fig:genforking_lemma} then the probability $\frkProb := \condprob{b = 1}{y \gets \ig;\ h_1, \ldots, h_{q} \sample H;\ (b, \vec{s}) \gets \genforking_{\zdv}^{m}(y, h_1, \ldots, h_q)}$ is at least \[ \frac{\accProb^m}{q^{m - 1}} - \accProb \cdot \left(1 - \frac{h!}{(h - m)! \cdot h^{m}}\right). \] \begin{figure}[t] \centering \fbox{ \procedure{$\genforking_{\zdv}^{m} (y,h_1^{1}, \ldots, h_{q}^{1})$} { \rho \sample \RND{\zdv}\\ (i, s_1) \gets \zdv(y, h_1^{1}, \ldots, h_{q}^{1}; \rho)\\ i_1 \gets i\\ \pcif i = 0\ \pcreturn (0, \bot)\\ \pcfor j \in \range{2}{m}\\ \pcind h_{1}^{j}, \ldots, h_{i - 1}^{j} \gets h_{1}^{j - 1}, \ldots, h_{i - 1}^{j - 1}\\ \pcind h_{i}^{j}, \ldots, h_{q}^{j} \sample H\\ \pcind (i_j, s_j) \gets \zdv(y, h_1^{j}, \ldots, h_{i - 1}^{j}, h_{i}^{j}, \ldots, h_{q}^{j}; \rho)\\ \pcind \pcif i_j = 0 \lor i_j \neq i\ \pcreturn (0, \bot)\\ \pcif \exists (j, j') \in \range{1}{m}^2, j \neq j' : (h_{i}^{j} = h_{i}^{j'})\ \pcreturn (0, \bot)\\ \pcelse \pcreturn (1, \vec{s}) }} \caption{Generalised forking algorithm $\genforking_{\zdv}^{m}$} \label{fig:genforking_lemma} \end{figure} \end{lemma} The proof goes similarly to \cite[Lemma 1]{CCS:BelNev06} with some modifications required by the fact that the protocol has more than 3 rounds and the number of transcripts required is larger. Due to page limit, the proof is presented in \cref{sec:forking_proof}. To highlight importance of the generalised forking lemma we describe how we use it in our forking simulation-extractability proof. Let $\proofsystem$ be a forking sound proof system where for an instance $\inp$ the corresponding witness can be extracted from an $(1, \ldots, 1, n_k, 1, \ldots, 1)$-tree of accepting transcripts. Let $\advse$ be the simulation-extractability adversary that outputs an accepting proof with probability at least $\accProb$. (Although we use the same $\accProb$ to denote probability of $\zdv$ outputting a non-zero $i$ and the probability of $\advse$ outputing an accepting proof, we claim that these probabilities are exactly the same by the way we define $\zdv$.) Let $\advse$ produce an accepting proof $\zkproof_{\advse}$ for instance $\inp_{\advse}$; $r$ be $\advse$'s randomness; $Q$ the list of queries submitted by $\advse$ along with simulator $\simulator$'s answers; and $Q_\ro$ be the list of all random oracle queries made by $\advse$. All of these are given to the extractor $\ext$ that internally runs the forking algorithm $\genforking_\zdv^{n_k}$. Algorithm $\zdv$ takes $(\srs, \advse, %\inp_\advse, %\zkproof_\advse, Q, r)$ as input $y$ and $Q_\ro$ as input $h_1^1, \ldots, h_q^1$. (For the sake of completeness, we allow $\genforking_\zdv^{n_k}$ to pick $h^1_{l + 1}, \ldots, h^1_q$ responses if $Q_\ro$ has only $l < q$ elements.) Next, $\zdv$ internally runs $\advse(\srs; r)$ and responds to its random oracle and simulator queries by using $Q_\ro$ and $Q$. Note that $\advse$ makes the same queries as it did before it output $(\inp_{\advse}, \zkproof_{\advse})$ as it is run on the same random tape and with the same answers from the simulator and random oracle. Once $\advse$ outputs $\zkproof_{\advse}$, algorithm $\zdv$ outputs $(i, \zkproof_{\advse})$, where $i$ is the index of a random oracle query submitted by $\advse$ to receive the challenge after the $k$-th message from the prover---a message where the tree of transcripts branches. Then, after the first run of $\advse$ is done, the extractor runs $\zdv$ again, but this time it provides fresh random oracle responses $h^2_i, \ldots, h^2_q$. Note that this is equivalent to rewinding $\advse$ to a point just before $\advse$ is about to ask its $i$-th random oracle query. The probability that the adversary produces an accepting transcript with the fresh random oracle responses is at least $\accProb$. This continues until the required number of transcripts is obtained. We note that in the original forking lemma, the forking algorithm $\forking$, cf.~\cref{fig:forking_lemma}, gets only as input $y$ and elements $h^1_1, \ldots, h^1_q$ are randomly picked from $H$ internally by $\forking$. However, assuming that $h^1_1, \ldots, h^1_q$ are random oracle responses, and thus random, makes the change only notational. We also note that the general forking lemma proposed in \cref{lem:generalised_forking_lemma} works for protocols with an extractor that can obtain the witness from a $(1, \ldots, 1, n_k, 1, \ldots, 1)$-tree of accepting transcripts. This limitation however does not affect the main result of this paper, i.e.~showing that both $\plonk$ and $\sonic$ are forking simulation extractable. \subsection{Unique-response protocols} Another technical hurdle is the assumption of unique response property of the transformed sigma protocol required by Faust et al. The original Fischlin's formulation, although suitable for applications presented in \cite{C:Fischlin05,INDOCRYPT:FKMV12}, does not suffice in our case. First, the property assumes that the protocol has three messages, with the second being the challenge from the verifier. That is not the case we consider here. Second, it is not entirely clear how to generalize the property. Should one require that after the first challenge from the verifier, the prover's responses are fixed? That does not work since the prover needs to answer differently on different verifier's challenges, as otherwise the protocol could have fewer rounds. Another problem is that the protocol could consist of a round other than the first one where the prover message is randomized. Unique response cannot hold in this case. Finally, the protocols we consider here are not in the standard model, but use an SRS what also complicates things considerably. We walk around these obstacles by providing a generalised notion of the unique response property. More precisely, we say that a $(2\mu + 1)$-message protocol has \emph{unique responses from $i$}, and call it an $\ur{i}$-protocol, if it follows the definition below: \begin{definition}[$\ur{i}$-protocol] \label{def:wiur} Let $\proofsystem$ be a $(2\mu + 1)$-message public coin proof system $\ps = (\kgen, \prover, \verifier, \simulator)$. Let $\proofsystem_\fs$ be $\proofsystem$ after the Fiat--Shamir transform and $\ro$ the random oracle. Denote by $a_1, \ldots, a_{\mu}, a_{\mu + 1}$ protocol messages output by the prover, We say that $\proofsystem$ has \emph{unique responses from $i$ on} if for any $\ppt$ adversary $\adv$: \[ \prob{ \begin{aligned} & \inp, \vec{a} = (a_1, \ldots, a_{\mu + 1}), \vec{a'} = (a'_1, \ldots, a'_{\mu + 1}) \gets \adv^\ro(\srs), \\ & \vec{a} \neq \vec{a'}, a_1, \ldots, a_{i} = a'_1, \ldots, a'_{i}, \\ & \verifier^\ro_\fs (\srs, \inp, \vec{a}) = \verifier^\ro_\fs(\srs, \inp, \vec{a'}) = 1 \end{aligned} \ \left|\ \vphantom{\begin{aligned} & \vec{a} = (a_0, b_0, \ldots, a_j, b_j, a_\mu), \vec{a'} = (a'_0, b'_0, \ldots, a'_j, b'_j a'_\mu) \gets \adv(\srs), \vec{a} \neq \vec{a'}, \\ & b_k = b'_k, k \in \range{1, \mu - 1},\\ a_l = a'_l, l \in \range{1}{j}, j > i \end{aligned}} \srs \gets \kgen_\fs(\REL) \right. } \] is upper-bounded by some negligible function $\negl$. % Let $\proofsystem$ be a $(2\mu + 1)$-message public coin proof system % $\ps = (\kgen, \prover, \verifier, \simulator)$ and let $r$ be verifier's % randomness which determines its challenges $r_1, \ldots, r_\mu$. Denote by % $\vec{a} = a_1, \ldots, a_{\mu}, a_{\mu + 1}$ protocol messages output by the prover and by $r_1, \ldots, r_\mu$ the challenges of the verifier, We % say that $\proofsystem$ has \emph{unique responses from $i$ on} if for any % $\ppt$ adversary $\adv$: % \[ % \prob{ % \begin{aligned} % & \vec{a} \neq \vec{a'}, a_1, \ldots, a_{i} = a'_1, % \ldots, a'_{i}, \\ % & \verifier (\srs, \inp, \vec{a}; r) = % \verifier(\srs, \inp, \vec{a'}; r) = 1 % \end{aligned} % \ \left|\ % \vphantom{\begin{aligned} % & \vec{a} = (a_0, b_0, \ldots, a_j, b_j, a_\mu), \vec{a'} = (a'_0, b'_0, \ldots, a'_j, % b'_j a'_\mu) \gets \adv(\srs), \vec{a} \neq \vec{a'}, \\ % & b_k = b'_k, k \in \range{1, \mu - 1},\\ a_l = a'_l, l \in % \range{1}{j}, j > i % \end{aligned}} % \begin{aligned} % &\srs \gets \kgen(\REL) \\ % &\inp, \vec{a}, \vec{a'} \gets \adv(\srs) \\ % & r_1, \ldots, r_i \gets H^i\\ % & r_{i+1}, \ldots r_\mu \gets H^{\mu-i}, r'_{i+1}, \ldots r'_\mu \gets H^{\mu-i} % \end{aligned} % \right. % } \leq \negl. % \] \end{definition} Intuitively, a protocol is $\ur{i}$ if it is infeasible for a $\ppt$ adversary to produce a pair of acceptable and different proofs $\zkproof$, $\zkproof'$ that are the same on first $i$ messages. % after $i$-th prover's message, all % $\prover$'s further messages are determined by the witness it knows, the % messages it already send and received and the future challenges from the % verifier. We note that the definition above is also meaningful for protocols without an SRS. Intuitively in that case $\srs$ is the empty string. \iffalse \subsection{Forking soundness} Note that the special soundness property (as usually defined) holds for all---even computationally unbounded---adversaries. Unfortunately, since a simulation trapdoors for $\plonkprot$ and $\sonicprot$ exist, the protocols cannot be special sound in that regard. This is because an unbounded adversary could recover the trapdoor and build a number of simulated proofs for a fake statement. Hence, we provide a weaker, yet sufficient, definition of \emph{forking soundness}. More precisely, we state that an adversary that is able to answer correctly multiple challenges either knows the witness or can be used to break some computational assumption. \begin{definition}[Forking soundness] Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an $(2 \mu + 1)$-message proof system for a relation $\REL$. We say that $\proofsystem$ is $(\epsss(\secpar), (n_1, \ldots, n_\mu))$-\emph{forking sound} if there exists an extractor $\extt$ that given an $(n_1, \ldots, n_\mu)$-tree of acceptable transcripts $\tree$ and instance $\inp$ output by some $\ppt$ adversary $\adv(\REL, \srs)$, for $\srs \sample \kgen(\REL)$, outputs $\wit$ such that $\REL(\inp, \wit) = 1$ with probability at least $1 - \epsss$. \end{definition} Since we do not utilise the classical special soundness (that holds for all, even unbounded, adversaries) all references to that property should be understood as references to its computational version. \fi \subsection{Forking soundness} Note that the special soundness property (as usually defined) holds for all---even computationally unbounded---adversaries. Unfortunately, since a simulation trapdoors for $\plonkprot$ and $\sonicprot$ exist, the protocols cannot be special sound in that regard. This is because an unbounded adversary can recover the trapdoor and build a number of simulated proofs for a fake statement. Hence, we provide a weaker, yet sufficient, definition of \emph{forking soundness}. More precisely, we state that an adversary that is able to answer correctly multiple challenges either knows the witness or can be used to break some computational assumption. \chaya{a notion of rewinding-based knowledge soundness has been used to mean exactly the above in prior works, like BBF19. we should clarify if forking soundness different from rewinding-based knowledge soundness? seems to me like the diference is just that here it is tailored for the NI version.} \chaya{I now see that the diff is the access to the simulator that the adversary gets. might be helpful to clarify this, either here or in a tech overview section. I am not sure why this needs to be defined this way by giving access to the simulator.} However, differently from the standard definition of special soundness, we do not require from the extractor to be able to extract the witness from \emph{any} tree of acceptable transcripts. We require that the tree be produced honestly, that is, all challenges are picked randomly---exactly as an honest verifier would pick. Intuitively, the tree is as it would be generated by a $\genforking$ algorithm from the generalized forking lemma. % \begin{definition}[Forking soundness] % % Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an % % $(2 \mu + 1)$-message proof system for a relation $\REL$. Let $\tree$ be an % % $(n_1, \ldots, n_\mu)$-tree of acceptable transcripts output by a $\ppt$ tree % % building algorithm $\tdv$ which plays the role of the verifier $\ps.\verifier$ % % against $\ppt$ adversary $\adv$ which it interacts with. % % % % We say that $\proofsystem$ is % % $(\epsss, (n_1, \ldots, n_\mu))$-\emph{computationally special sound} if there % % exists an extractor $\extt$ that given an $ (n_1, \ldots, n_\mu)$-tree of % % acceptable transcripts $\tree$ for an instance $\inp \in \LANG_\REL$ output by % % a $\ppt$ tree building adversary $\tdv$ some $\ppt$ adversary % % $\adv(\srs)$, for $\srs \sample \kgen(\REL)$, outputs $\wit$ such that % % $\REL(\inp, \wit) = 1$ with probability at least $1 - \epsss$. % Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an % $(2 \mu + 1)$-message proof system for a relation $\REL$. Let $\tree$ be an % $(n_1, \ldots, n_\mu)$-tree of transcripts for an in valid instance % $\inp$. We say that $\ps$ is $(\epsss, (n_1, \ldots, n_\mu))$-forking sound if there is an extractor $\extt$ that given $\tree$ extracts $\wit$ % such that $\REL(\inp, \wit) = 1$ with probability at least $1 - \epsss.$ % \end{definition} % \begin{definition}[$k$-round forking soundness] % % Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an % % $(2 \mu + 1)$-message proof system for a relation $\REL$. Let $\tree$ be an % % $(n_1, \ldots, n_\mu)$-tree of acceptable transcripts output by a $\ppt$ tree % % building algorithm $\tdv$ which plays the role of the verifier $\ps.\verifier$ % % against $\ppt$ adversary $\adv$ which it interacts with. % % % % We say that $\proofsystem$ is % % $(\epsss, (n_1, \ldots, n_\mu))$-\emph{computationally special sound} if there % % exists an extractor $\extt$ that given an $ (n_1, \ldots, n_\mu)$-tree of % % acceptable transcripts $\tree$ for an instance $\inp \in \LANG_\REL$ output by % % a $\ppt$ tree building adversary $\tdv$ some $\ppt$ adversary % % $\adv(\srs)$, for $\srs \sample \kgen(\REL)$, outputs $\wit$ such that % % $\REL(\inp, \wit) = 1$ with probability at least $1 - \epsss$. % Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an % $(2 \mu + 1)$-message proof system for a relation $\REL$. Let $\tree$ be the % algorithm below that rewinds $\advse^{\simulator_\fs, % \ro} (\srs; r)$ to produce a $(1, n_k, 1)$-tree of % transcripts such that none of the challenges in round $k$ were used in % simulated proofs. We say that $\ps$ is $(\epsss, ((1, n_k, 1)))$-forking % special sound if there is an extractor $\extt$ that given a tree produced by % $\tree$ extracts $\wit$ such that $\REL(\inp, \wit) = 1$ with probability at % least $1 - \epsss.$ % \end{definition} % Since we do not utilise the classical special soundness (that holds for all, % even unbounded, adversaries) all references to that property should be % understood as references to its computational version. \begin{definition}[$(\eps(\secpar), k,n)$-forking soundness] Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an $(2 \mu + 1)$-message proof system for a relation $\REL$. For any $\ppt$ adversary $\advse^{\simulator_\fs, \ro} (\srs; r)$ we consider the procedure $\zdv$ that provided the transcript $(\srs, \adv, r, Q, Q_{H})$ and $h_1, \ldots, h_q$ runs $\adv$ by providing it with random oracle queries and simulated proofs. While $Q_{H}$ is consistent with $h_1, \ldots, h_q$, it replays the proofs of $Q$. % $\zdv$ returns the index $i$ of the random oracle query made for challenge $k$ and the proof $\adv$ returns Consider the algorithm $\genforking_{\zdv}^{n}$ that rewinds $\zdv$ to produce a $(1,\dots, n,\dots, 1)$-tree of transcripts such that none of the $n$ challenges in round $k$ were used in simulated proofs. We say that $\ps$ is $(\eps(\secpar), k,n)$-forking sound if for any PPT adversary the probability that \begin{align*} \Pr\left[ \REL(\inp, \wit) = 0 \,\Biggl|\, \begin{aligned} & \srs \sample \kgen(\REL), r \sample \RND{\advse}, (\inp_{\advse}, \zkproof_{\advse}) \gets \advse^{\simulator_\fs,\ro} (\srs; r), \\ & (1, \tree) \gets \genforking_{\zdv}^{m}((\srs,\adv,r,Q, Q_{H}),Q_{H}), \wit \gets \extt(\tree) \end{aligned} \right] \leq \eps(\secpar). \end{align*} List $Q$ contains all $(\inp, \zkproof)$ pairs where $\inp$ is an instance provided to the simulator by the adversary and $\zkproof$ is the simulator's answer. List $Q_\ro$ contains all $\advse$'s queries to $\ro$ and $\ro$'s answers. \end{definition} \begin{definition}[$(\eps(\secpar), k,n)$-forking soundness] Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an $(2 \mu + 1)$-message proof system for a relation $\REL$. Let $\tdv$, called tree creator, be the algorithm below that rewinds the $\ppt$ adversary $\advse^{\simulator_\fs, \ro} (\srs; r)$ to produce a $(1,\dots, n,\dots, 1)$-tree of transcripts such that none of the $n$ challenges in round $k$ were used in simulated proofs. $\tdv$ has oracle access to $\adv$ and provides it with (oracle) access to random oracle $\ro$ and simulator $\simulator_\fs$ -- more precisely $\tdv$ has an internal procedure $\bdv$ that provided $\srs$ and random oracle queries' responses $h_1, \ldots, h_q$ gives $\adv$ access to the random oracle and simulates proof for it. In the end, $\bdv$ returns the index $i$ of the random oracle query made for challenge $k$, the set $Q$ of simulator random oracle indexes, the instance $\inp$, and the proof $\adv$ returns. Eventually, $\tdv$ returns a $(1, \ldots, n, \dots, 1)$ tree of acceptable transcripts~$\tree$. \begin{figure} \centering \fbox{ \procedure{$\tdv(\adv, \srs \sample \kgen(\REL))$} {h_1^{1}, \ldots, h_{q}^1 \sample H \\ (i, Q, \inp, \zkproof_1) \gets \bdv(\adv, \srs, h_1^{1}, \ldots, h_{q}^{1})\\ % i_1 \gets i\\ \pcif i\in Q \lor \verifier(\srs, \inp, \zkproof_1) = 0\ \pcreturn (0, \bot)\\ \pcfor j \in \range{2}{m}\\ \pcind h_{1}^{j}, \ldots, h_{i - 1}^{j} \gets h_{1}^{j - 1}, \ldots, h_{i - 1}^{j - 1}\\ \pcind h_{i}^{j}, \ldots, h_{q}^{j} \sample H\\ \pcind (i_j, Q_j, \inp_j, \zkproof_j) \gets \bdv(\adv, \srs, h_1^{j}, \ldots, h_{i - 1}^{j}, h_{i}^{j}, \ldots, h_{q}^{j})\\ \pcind \pcif i \neq i_j \lor i_j \in Q_j \lor \inp \neq \inp_j \lor \verifier(\srs, \inp_j, \zkproof_i) = 0\ \pcreturn (0, \bot)\\ %\pcind \pcif i_j = 0 \lor i_j \neq i\ \pcreturn (0, \bot)\\ % \pcif \exists (j, j') \in \range{1}{m}^2, j \neq j' : (h_{i}^{j} = % h_{i}^{j'})\ % % pcreturn (0, \bot)\\ \pcelse \pcreturn (1, \tree = (\inp, \pmb{\pi}))} } \end{figure} % We say that $\ps$ is $(\eps(\secpar), k,n)$-forking sound if for any PPT adversary the probability that \[ \Pr\left[ % \begin{aligned} % & \forall_{\zkproof \in \tree} \verifier(\srs, \inp, \zkproof) = 1, \\ \wit \gets \extt(\tree), \REL(\inp, \wit) = 0 % \end{aligned} \,\left|\, %\begin{aligned} \srs \sample \kgen(\REL), (1, \tree) \gets \tdv(\adv, \srs) % \end{aligned} \right.\right] \leq \eps(\secpar). \] \end{definition} \section{Simulation soundness and forking simulation-extractability---the general result} \label{sec:general} Equipped with definitional framework of \cref{sec:se_definitions} we are ready to present the main result of this paper---a proof of simulation soundness and forking simulation extractability of Fiat-Shamir NIZK based on multi-round protocols. The proofs go by game hopping. The games are controlled by an environment $\env$ that internally runs a simulation extractability adversary $\advse$, provides it with access to a random oracle and simulator, and when necessary, rewinds it. The games differ by various breaking points, i.e.~points where the environment decides to abort the game. Denote by $\zkproof_{\advse}, \zkproof_{\simulator}$ proofs returned by the adversary and the simulator respectively. We use $\zkproof[i]$ to denote prover's message in the $i$-th round of the proof (counting from 1), i.e.~$(2i - 1)$-th message exchanged in the protocol. $\zkproof[i].\ch$ denotes the challenge that is given to the prover after $\zkproof[i]$, and $\zkproof[i..j]$ to denote all messages of the proof including challenges between rounds $i$ and $j$, but not challenge $\zkproof[j].\ch$. When it is not explicitly stated, we denote the proven instance $\inp$ by $\zkproof[0]$ (however, there is no following challenge $\zkproof[0].\ch$). Without loss of generality, we assume that whenever the accepting proof contains a response to a challenge from a random oracle, then the adversary queried the oracle to get it. It is straightforward to transform any adversary that violates this condition into an adversary that makes these additional queries to the random oracle and wins with the same probability. \begin{theorem}[Simulation soundness] \label{thm:simsnd} Assume that $\ps$ is $k$-programmable HVZK in the standard model, that is $\epss(\secpar)$-sound and $\ur{k}$ with security $\epsur(\secpar)$. Then, the probability that a $\ppt$ adversary $\adv$ breaks simulation soundness of $\ps_{\fs}$ is upper-bounded by \( \epsur(\secpar) + q_\ro^\mu \epss(\secpar)\,, \) where $q$ is the total number of queries made by the adversary $\adv$ to a random oracle $\ro\colon \bin^{*} \to \bin^{\secpar}$. \end{theorem} \begin{proof} \ngame{0} This is a simulation soundness game played between an adversary $\adv$ who is given access to a random oracle $\ro$ and simulator $\psfs.\simulator$. $\adv$ wins if it manages to produce an accepting proof for a false statement. In the following game hops, we upper-bound the probability that this happens. \ngame{1} This is identical to $\game{0}$ except that the game is aborted if there is a simulated proof $\zkproof_\simulator$ for $\inp_{\adv}$ such that $(\inp_{\adv}, \zkproof_\simulator[1..k]) = (\inp_{\adv}, \zkproof_{\adv}[1..k])$. That is, the adversary in its final proof reuses at least $k$ messages from a simulated proof it saw before and the proof is accepting. Denote this event by $\event{\errur}$. \ncase{Game 0 to Game 1} We have, \( \prob{\game{0} \land \nevent{\errur}} = \prob{\game{1} \land \nevent{\errur}} \) and, from the difference lemma, cf.~\cref{lem:difference_lemma}, $ \abs{\prob{\game{0}} - \prob{\game{1}}} \leq \prob{\event{\errur}}\,$. Thus, to show that the transition from one game to another introduces only minor change in probability of $\adv$ winning it should be shown that $\prob{\event{\errur}}$ is small. We can assume that $\adv$ queried the simulator on the instance it wishes to output, i.e.~$\inp_{\adv}$. We show a reduction $\rdvur$ that utilises $\adv$ to break the $\ur{k}$ property of $\ps$. Let $\rdvur$ run $\advse$ internally as a black-box: \begin{compactitem} \item The reduction answers both queries to the simulator $\psfs.\simulator$ and to the random oracle. It also keeps lists $Q$, for the simulated proofs, and $Q_\ro$ for the random oracle queries. \item When $\adv$ makes a fake proof $\zkproof_{\adv}$ for $\inp_{\adv}$, $\rdvur$ looks through lists $Q$ and $Q_\ro$ until it finds $\zkproof_{\simulator}[0..k]$ such that $\zkproof_{\adv}[0..k] = \zkproof_{\simulator}[0..k]$ and a random oracle query $\zkproof_{\simulator}[k].\ch$ on $\zkproof_{\simulator}[0..k]$. \item $\rdvur$ returns two proofs for $\inp_{\adv}$: \begin{align*} \zkproof_1 = (\zkproof_{\simulator}[1..k], \zkproof_{\simulator}[k].\ch, \zkproof_{\simulator}[k + 1..\mu + 1])\\ \zkproof_2 = (\zkproof_{\simulator}[1..k], \zkproof_{\simulator}[k].\ch, \zkproof_{\adv}[k + 1..\mu + 1]) \end{align*} \end{compactitem} If $\zkproof_1 = \zkproof_2$, then $\adv$ fails to break simulation soundness, as $\zkproof_2 \in Q$. On the other hand, if the proofs are not equal, then $\rdvur$ breaks $\ur{k}$-ness of $\ps$. This happens only with negligible probability $\epsur(\secpar)$, hence \( \prob{\event{\errur}} \leq \epsur(\secpar)\,. \) \ngame{2} This is identical to $\game{1}$ except that now the environment aborts if the instance the adversary proves is not in the language. \ncase{Game 1 to Game 2} % REDUCTION TO INTERACTIVE SOUNDNESS: We show that $\abs{\prob{\game{1}} - \prob{\game{2}}} \leq q^{\mu} \cdot \epss(\secpar)$, where $\epss(\secpar)$ is the probability of breaking soundness of the underlying \emph{interactive} protocol $\ps$. Note that $\abs{\prob{\game{1}} - \prob{\game{2}}}$ is the probability that $\adv$ outputs an acceptable proof for a false statement which does not break the unique response property (such proofs have been excluded by $\game{1}$). Consider a soundness adversary $\adv'$ who initiates a proof with $\ps$'s verifier $\ps.\verifier$, internally runs $\adv$ and proceeds as follows: \begin{compactitem} \item It guesses indices $i_1, \ldots, i_\mu$ such that random oracle queries $h_{i_1}, \ldots, h_{i_\mu}$ are the queries used in the $\zkproof_\adv$ proof eventually output by $\adv$. This is done with probability at least $1/q^\mu$ (since there are $\mu$ challenges from the verifier in $\ps$). \item On input $h$ for the $i$-th, $i \not\in \smallset{{i_1}, \ldots, {i_\mu}}$, random oracle query, $\adv'$ returns randomly picked $y$, sets $\ro(h) = y $ and stores $(h, y)$ in $Q_\ro$ if $h$ is sent to $\ro$ the first time. If that is not the case, $\adv$ finds $h$ in $Q_\ro$ and returns the corresponding $y$. \item On input $h_{i_j}$ for the $i_j$-th, $i_j \in \smallset{{i_1}, \ldots, {i_\mu}}$, random oracle query, $\adv'$ parses $h_{i_j}$ as a partial proof transcript $\zkproof_\adv[1..j]$ and runs $\ps$ using $\zkproof_\adv[j]$ as a $\ps.\prover$'s $j$-th message to $\ps.\verifier$. The verifier responds with a challenge $\zkproof_\adv[j].\ch$. $\adv'$ sets $\ro(h_{i_j}) = \zkproof_\adv[j].\ch$. If we guessed the indices correctly we have that $h_{i_{j'}}$, for $j' \leq j$, parsed as $\zkproof_\adv[1..j']$ is a prefix of $\zkproof_\adv[1..j]$. \item On query $\inp_\simulator$ to $\simulator$, $\adv'$ runs the simulator $\ps.\simulator$ internally. Note that we require a simulator that only programs the random oracle for $j \geq k$. If the simulator makes a previously unanswered random oracle query with input $\zkproof_\simulator[1..j]$, $1 \leq j < k$, and this is the $i_j$-th query, it generates $\zkproof_\simulator[j].\ch$ by invoking $\ps.\verifier$ on $\zkproof_\simulator[j]$ and programs $\ro(h_{i_j}) = \zkproof_\simulator[j].\ch$. It returns $\zkproof_\simulator$. \item Answers $\ps.\verifier$'s final challenge $\zkproof_\adv[\mu].\ch$ using the answer given by $\adv$, i.e.~$\zkproof_\adv[\mu]$. \end{compactitem} That is, $\adv'$ manages to break soundness of $\ps$ if $\adv$ manages to break simulation soundness without breaking the unique response property and $\adv'$ correctly guesses the indices of $\adv$ random oracle queries. This happens with probability upper-bounded by $\abs{\prob{\game{1}} - \prob{\game{2}}} \cdot \infrac{1}{q^{\mu}}$. Hence $\abs{\prob{\game{1}} - \prob{\game{2}}} \leq q^{\mu} \cdot \epss(\secpar)$. Note that in $\game{2}$ the adversary cannot win. Thus the probability that $\advss$ is successful is upper-bounded by $\epsur(\secpar) + q^{\mu} \cdot \epss(\secpar)$. \qed \end{proof} We conjecture that based on the recent results on state restoration soundness~\cite{cryptoeprint:2020:1351}, which effectively allows to query the verifier multiple times on different overlapping transcripts, the $q^{\mu}$ loss could be avoided. However, this would reduce the class of protocols covered by our results. \begin{theorem}[Forking simulation-extractable multi-message protocols] \label{thm:se} Let $\ps = (\kgen, \prover, \verifier, \simulator)$ be an interactive $(2 \mu + 1)$-message proof system for $\RELGEN(\secparam)$ that is honest verifier zero-knowledge in the standard model\hamid{Is this different from Trapdoor-less simulatable proof system (def. 1)?}\footnote{Crucially, we require that one can provide an indistinguishable simulated proof without any additional knowledge, as e.g~knowledge of a SRS trapdoor.}, has $\ur{k}$ property with security $\epsur(\secpar)$, and is $(\epss(\secpar), k, n)$-forking sound. % for % $n_i = 1, i \in \range{1}{\mu} \setminus \smallset{k}$ and $n_k = n$. % Let $\ro\colon \bin^{*} \to \bin^{\secpar}$ be a random oracle. Then $\psfs$ is forking simulation-extractable with extraction error $\epsur(\secpar)$ against $\ppt$ algebraic adversaries that makes up to $q$ random oracle queries and returns an acceptable proof with probability at least $\accProb$. The extraction probability $\extProb$ is at least \( \extProb \geq \frac{1}{q^{n - 1}} (\accProb - \epsur(\secpar))^{n} -\eps(\secpar)\,, \) for some negligible $\eps(\secpar)$. \end{theorem} \begin{proof} \ngame{0} This is a simulation extraction game played between an adversary $\advse$ who has given access to a random oracle $\ro$ and simulator $\psfs.\simulator$. There is also an extractor $\ext$ that, from a proof $\zkproof_{\advse}$ for instance $\inp_{\advse}$ output by the adversary and from transcripts of $\advse$'s operations is tasked to extract a witness $\wit_{\advse}$ such that $\REL(\inp_{\advse}, \wit_{\advse})$ holds. $\advse$ wins if it manages to produce an acceptable proof and the extractor fails to reveal the corresponding witness. In the following game hops we upper-bound the probability that this happens. \ngame{1} This is identical to $\game{0}$ except that now the game is aborted if there is a simulated proof $\zkproof_\simulator$ for $\inp_{\advse}$ such that $(\inp_{\advse}, \zkproof_\simulator[1..k]) = (\inp_{\advse}, \zkproof_{\advse}[1..k])$. That is, the adversary in its final proof reuses at least $k$ messages from a simulated proof it saw before and the proof is acceptable. Denote that event by $\event{\errur}$. \ncase{Game 0 to Game 1} $\prob{\event{\errur}} \leq \epsur(\secpar)$. The proof goes exactly as in \cref{thm:simsnd}. \COMMENT{We have, \( \prob{\game{0} \land \nevent{\errur}} = \prob{\game{1} \land \nevent{\errur}} \) and, from the difference lemma, cf.~\cref{lem:difference_lemma}, \[ \abs{\prob{\game{0}} - \prob{\game{1}}} \leq \prob{\event{\errur}}\,. \] Thus, to show that the transition from one game to another introduces only minor change in probability of $\advse$ winning it should be shown that $\prob{\event{\errur}}$ is small. We can assume that $\advse$ queried the simulator on the instance it wishes to output---$\inp_{\advse}$. We show a reduction $\rdvur$ that utilises $\advse$, who outputs a valid proof for $\inp_{\advse}$, to break the $\ur{k}$ property of $\ps$. Let $\rdvur$ run $\advse$ internally as a black-box: \begin{itemize} \item The reduction answers both queries to the simulator $\psfs.\simulator$ and to the random oracle. It also keeps lists $Q$, for the simulated proofs, and $Q_\ro$ for the random oracle queries. \item When $\advse$ makes a fake proof $\zkproof_{\advse}$ for $\inp_{\advse}$, $\rdvur$ looks through lists $Q$ and $Q_\ro$ until it finds $\zkproof_{\simulator}[0..k]$ such that $\zkproof_{\advse}[0..k] = \zkproof_{\simulator}[0..k]$ and a random oracle query $\zkproof_{\simulator}[k].\ch$ on $\zkproof_{\simulator}[0..k]$. \item $\rdvur$ returns two proofs for $\inp_{\advse}$: \begin{align*} \zkproof_1 = (\zkproof_{\simulator}[1..k], \zkproof_{\simulator}[k].\ch, \zkproof_{\simulator}[k + 1..\mu + 1])\\ \zkproof_2 = (\zkproof_{\simulator}[1..k], \zkproof_{\simulator}[k].\ch, \zkproof_{\advse}[k + 1..\mu + 1]) \end{align*} \end{itemize} If $\zkproof_1 = \zkproof_2$, then $\advse$ fails to break simulation extractability, as $\zkproof_2 \in Q$. On the other hand, if the proofs are not equal, then $\rdvur$ breaks $\ur{k}$-ness of $\ps$. This happens only with negligible probability $\epsur(\secpar)$, hence \( \prob{\event{\errur}} \leq \epsur(\secpar)\,. \) } \ngame{2} This is identical to $\game{1}$ except that now the environment aborts also when it fails to build a $(1, \ldots, 1, n, 1, \ldots, 1)$-tree of accepting transcripts $\tree$ by rewinding $\advse$. Denote that event by $\event{\errfrk}$. \ncase{Game 1 to Game 2} Note that for every acceptable proof $\zkproof_{\advse}$, we may assume that whenever $\advse$ outputs in Round $k$ message $\zkproof_{\advse}[k]$, then the $(\inp_{\advse}, \zkproof_{\advse}[1..k])$ random oracle query was made by the adversary, not the simulator\footnote{\cite{INDOCRYPT:FKMV12} calls these queries \emph{fresh}.}, i.e.~there is no simulated proof $\zkproof_\simulator$ on $\inp_\simulator$ such that $(\inp_{\advse}, \zkproof_{\advse} [1..k]) = (\inp_\simulator, \zkproof_\simulator[1..k])$. Otherwise, the game would be already interrupted by the error event in Game $\game{1}$. As previously, \( \abs{\prob{\game{1}} - \prob{\game{2}}} \leq \prob{\event{\errfrk}}\,. \) We describe our extractor $\ext$ here. The extractor takes as input relation $\REL$, SRS $\srs$, $\advse$'s code, its randomness $r$, the output instance $\inp_{\advse}$ and proof $\zkproof_{\advse}$, as well as the list $Q$ of simulated proofs (and their instances) and the list of random oracle queries and responses $Q_\ro$. Then, $\ext$ starts a forking algorithm $\genforking^{n}_\zdv(y,h_1, \ldots, h_q)$ for $y = (\srs, \advse, r, \inp_{\advse}, \zkproof_{\advse}, Q)$ where we set $h_1, \ldots, h_q$ to be the consecutive queries from list $Q_\ro$. We run $\advse$ internally in $\zdv$.% which returns the proof $\zkproof$ and index $i$ %of the random oracle query that $\advse$ used to answer $\zkproof$'s $k$-th challenge. To assure that in the first execution of $\zdv$ the adversary $\advse$ produce the same $(\inp_{\advse}, \zkproof_{\advse})$ as in the extraction game, $\zdv$ provides $\advse$ with the same randomness $r$ and answers queries to the random oracle and simulator with pre-recorded responses in $Q_\ro$ and $Q$. % Note, that since the view of the adversary when run inside $\zdv$ is the same as its view with access to the real random oracle and simulator, it produces exactly the same output. After the first run, $\zdv$ outputs the index $i$ of a random oracle query that was used by $\advse$ to compute the challenge $\zkproof[k].\ch = \ro(\zkproof_{\advse}[0..k])$ it had to answer in the $(k + 1)$-th round and adversary's transcript, denoted by $s_1$ in $\genforking$'s description. If no such query took place $\zdv$ outputs $i = 0$. Then new random oracle responses are picked for queries indexed by $i, \ldots, q$ and the adversary is rewound to the point just prior to when it gets the response to RO query $\zkproof_{\advse}[0..k]$. The adversary gets a random oracle response from a new set of responses $h^2_i, \ldots, h^2_q$. If the adversary requests a simulated proof after seeing $h^2_i$ then $\zdv$ computes the simulated proof on its own. Eventually, $\zdv$ outputs index $i'$ of a query that was used by the adversary to compute $\ro(\zkproof_{\advse}[0..k])$, and a new transcript $s_2$. $\zdv$ is run $n$ times with different random oracle responses. If a tree $\tree$ of $n$ transcripts is built then $\ext$ runs internally the tree extractor $\extt(\tree)$ and outputs what it returns. We emphasize here the importance of the unique response property. If it does not hold then in some $j$-th execution of $\zdv$ the adversary could reuse a challenge that it learned from observing proofs in $Q$. In that case, $\zdv$ would output $i = 0$, making the extractor fail. Fortunately, the case that the adversary breaks the unique response property has already been covered by the abort condition in $\game{1}$. Denote by $\waccProb$ the probability that $\advse$ outputs a proof that is accepted and does not break $\ur{k}$-ness of $\ps$. Denote by $\waccProb'$ the probability that algorithm $\zdv$, defined in the lemma, produces an accepting proof with a fresh challenge after Round $k$. Given the discussion above, we can state that $\waccProb = \waccProb'$. Next, from the generalised forking lemma, cf.~\cref{lem:generalised_forking_lemma}, we get that \begin{equation} \begin{split} \prob{\event{\errfrk}} \leq 1 - \waccProb \cdot \left(\infrac{\waccProb^{n - 1}}{q^{n - 1}} + \infrac{(2^\secpar) !}{((2^\secpar - n)! \cdot (2^\secpar)^{n})} - 1\right). % = \\ % 1 - \left(\frac{\waccProb^{n}}{q^{n - 1}} + % \waccProb \cdot \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot % (2^\secpar)^{n}} - \waccProb\right)\,. \end{split} \end{equation} % \ngame{3} This game is identical to $\game{2}$ except that the environment % aborts it when the adversary manages to break simulation soundness in one the % transcripts in tree $\tree$. % \ncase{$\game{2} \mapsto \game{3}$} From \cref{thm:simsnd} we have that % probability that $\adv$ breaks simulation soundness, while not breaking the % unique response property, is upper-bounded by $q_\ro^{\mu} \epss$. Since all the % branches of the tree are either for a valid or invalid statement, then the % probability that $\adv$ breaks simulation soundness in one of the tree branches % is upper-bounded by $q_{\ro}^{\mu} \epss$ as well. \ngame{3} This game is identical to $\game{2}$ except that it aborts if $\extt(\tree)$ run by $\ext$ fails to extract the witness. \ncase{Game 2 to Game 3} Since $\ps$ is forking-sound the probability that $\extt(\tree)$ fails is upper-bounded by $\epsss(\secpar)$. Since Game $\game{3}$ is aborted when it is impossible to extract the correct witness from $\tree$, hence the adversary $\advse$ cannot win. Thus, by the game-hopping argument, \[ \abs{\prob{\game{0}} - \prob{\game{4}}} \leq 1 - \left(\frac{\waccProb^{n}}{q^{n - 1}} + \waccProb \cdot \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot (2^\secpar)^{n}} - \waccProb\right) + \epsur(\secpar) + %q_{\ro}^{\mu} \epss + \epsss(\secpar)\,. \] Thus the probability that extractor $\extss$ succeeds is at least \[ \frac{\waccProb^{n}}{q^{n - 1}} + \waccProb \cdot \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot (2^\secpar)^{n}} - \waccProb - \epsur(\secpar) %- q_{\ro}^{\mu} \epss - \epsss(\secpar)\,. \] Since $\waccProb$ is probability of $\advse$ outputting acceptable transcript that does not break $\ur{k}$-ness of $\ps$, then $\waccProb \geq \accProb - \epsur(\secpar)$, where $\accProb$ is the probability of $\advse$ outputing an acceptable proof as defined in \cref{def:simext}. It thus holds \begin{equation} \label{eq:frk} \extProb \geq \frac{(\accProb - \epsur(\secpar))^{n}}{q^{n - 1}} - \underbrace{(\accProb - \epsur(\secpar)) \cdot \left( 1 - \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot (2^\secpar)^{n}}\right) - \epsur(\secpar) - % q_{\ro}^{\mu} \epss - \epsss(\secpar)}_{\eps(\secpar)}\,. \end{equation} Note that the part of \cref{eq:frk} denoted by $\eps(\secpar)$ is negligible as $\epsur(\secpar), \epsss(\secpar)$ are negligible, and $\frac{(2^\secpar) !}{(2^\secpar - n)! \cdot (2^\secpar)^{n}} \geq \left(\infrac{(2^\secpar - n)}{2^\secpar}\right)^{n}$ is overwhelming. Thus, \[ \extProb \geq q^{-(n - 1)} (\accProb - \epsur(\secpar))^{n} -\eps(\secpar)\,. \] and $\psfs$ is forking simulation extractable with extraction error $\epsur(\secpar)$. \qed \end{proof} % \ourpar{Inefficient simulation extractability gives efficient simulation soundness.} % As noted in \cref{sec:simext_def}, simulation soundness can be expressed in % terms of simulation extractability with an unbounded extractor. Holds then, % \begin{corollary}[Simulation extractability to simulation soundness.] % \label{cor:simext_to_ssnd} % Let $\ps$, as defined in \cref{thm:se} be simulation-extractable with % \[ % \extProb \geq \frac{(\accProb - \epsur)^{n}}{q^{n - 1}} - % (\accProb - \epsur) \cdot \left( 1 - % \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot % (2^\secpar)^{n}}\right) % - \epsur - \epsss, % \] % then it is simulation sound with % $\ssndProb \leq 2\epsur + \epsss 2^{-\secpar}(\accProb - \epsur)$ % \end{corollary} % \begin{proof} % Since according to the discussion in \cref{rem:simext_to_simsnd} $\ssndProb = % \accProb - \extProb$ it holds: % \begin{align*} % \ssndProb \leq \accProb - \left(\frac{(\accProb - \epsur)^{n}}{q^{n - 1}} - % (\accProb - \epsur) \cdot \left( 1 - % \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot % (2^\secpar)^{n}}\right) % - \epsur - \epsss,\right). % \end{align*} % Furthermore, as the extractor $\ext$ from \cref{rem:simext_to_simsnd} is unbounded, we can % assume that it is able to extract the witness from a single run of a % simulation-extractability adversary $\adv$. hence $n = 1$. Thus, % \begin{equation*} % \begin{split} % \ssndProb & \leq \accProb - \left(\accProb - \epsur - % (\accProb - \epsur) \cdot \left( 1 - % 1)\right) % - \epsur - \epsss\right) \\ % & = 2 \epsur % + \epsss \\ % \end{split} % \end{equation*} % \qed % \end{proof} \section{Non-Malleability of $\plonkprotfs$} \label{sec:plonk} In this section, we show that $\plonkprotfs$ is simulation-sound and forking simulation-extractable. To that end, we proceed as follows. First, we show that the version of the KZG polynomial commitment scheme that is proposed in the \plonk{} paper has the unique opening property, cf.~\cref{sec:poly_com} and \cref{lem:pcomp_op}. This is then used to show that $\plonkprot$ has the $\ur{2}$ property, cf.~\cref{lem:plonkprot_ur}. Next, we show that $\plonkprot$ is forking-sound. That is, given a number of accepting transcripts which match on the first 3 rounds of the protocol we can either recover a correct witness for the proven statement or use one of the transcripts to break the $\dlog$ assumption. This result is shown in the AGM, cf.~\cref{lem:plonkprot_ss}. Given forking-soundness of $\plonkprot$, we use the fact that it is also $\ur{2}$ and show, in a similar fashion to \cite{INDOCRYPT:FKMV12}, that it is simulation-extractable. That is, we build reductions that given a simulation extractability adversary $\advse$ either break the protocol's unique response property or based on forking soundness break the $\dlog$ assumption, if extracting a valid witness from a tree of transcripts is impossible. See \cref{thm:plonkprotfs_se}. % Due to page limit, we omit description of \plonk{} here and refer to % \cref{sec:plonk_explained}. Unfortunately, we also have to move some of the % proofs to the Supplementary Materials as well, cf.~\cref{sec:plonk_supp_mat} \subsection{Unique opening property of $\PCOMp$} \begin{lemma} \label{lem:pcomp_op} Let $\PCOMp$ be a batched version of a KZG polynomial commitment, cf.~\cref{fig:pcomp}, then $\PCOMp$ has the unique opening property in the AGM with security $\epsop(\secpar) \leq 2 \epsdlog(\secpar) + \infrac{1}{\abs{\FF_p}}$, where $\epsdlog(\secpar)$ is security of the $(\noofc + 2, 1)$-dlog assumption and $\FF_p$ is the field used in $\PCOMp$.\end{lemma} \begin{proof} Let $\vec{z} = (z, z') \in \FF_p^2$ be the two points the polynomials are evaluated at, $k \in \NN$ be the number of the committed polynomials to be evaluated at $z$, and $k' \in \NN$ be the number of the committed polynomials to be evaluated at $z'$, $\vec{c} \in \GRP^k, \vec{c'} \in \GRP^{k'}$ be the commitments, $\vec{s} \in \FF_p^k, \vec{s'} \in \FF_p^{k'}$ the evaluations, and $\vec{o} = (o, o') \in \FF_p^2$ be the commitment openings. We need to show that the probability a $\ppt$ $\adv$ opens the same commitment in two different ways is at most $\epsop(\secpar)$, even when the commitment openings are verified in batches. The idealised verifier checks whether the following equality, for $\gamma, r'$ picked at random, holds: \begin{multline} \label{eq:ver_eq_poly} \left(\sum_{i = 1}^{k} \gamma^{i - 1} \cdot \p{f}_i(X) - \sum_{i = 1}^{k} \gamma^{i - 1} \cdot s_i\right) + r' \left(\sum_{i = 1}^{k'} \gamma'^{i - 1} \cdot \p{f'}_i(X) - \sum_{i = 1}^{k'} \gamma'^{i - 1} \cdot s'_i \right)\\ \equiv \p{o}(X)(X - z) + r' \p{o}'(X)(X- z'). \end{multline} Since $r'$ has been picked at random from $\FF$, probability that \cref{eq:ver_eq_poly} holds while either \[ \sum_{i = 1}^{k} \gamma^{i - 1} \cdot \p{f}_i(X) - \sum_{i = 1}^{k} \gamma^{i - 1} \cdot s_i \not\equiv \p{o}(X)(X - z) \text{, or} \] \[ \sum_{i = 1}^{k'} \gamma'^{i - 1} \cdot \p{f'}_i(X) - \sum_{i = 1}^{k'} \gamma'^{i - 1} \cdot s'_i \not\equiv \p{o'}(X)(X - z') \] is $\infrac{1}{\abs{\FF_p}}$~cf.~\cite{EPRINT:GabWilCio19}. When \( \sum_{i = 1}^{k} \gamma^{i - 1} \cdot \p{f}_i(X) - \sum_{i = 1}^{k} \gamma^{i - 1} \cdot s_i = \p{o}(X)(X - z) \) holds, polynomial $\p{o}(X)$ is uniquely determined from the uniqueness of polynomial composition. Similarly, $\p{o'}(X)$ is uniquely determined as well. Since any discrepancy between the idealised verifier rejection and real verifier acceptance allows one to break the discrete logarithm problem, the probability that the real verifier accepts in one of the cases above is upper-bounded by $2 \epsdlog + \infrac{1}{\FF_p}$. \qed \end{proof} \subsection{Unique response property} \begin{lemma} \label{lem:plonkprot_ur} Let $\PCOMp$ be commitment of knowledge with security $\epsk(\secpar)$, $\epsbind(\secpar)$-binding and has unique opening property with security $\epsop(\secpar)$, then probability that a $\ppt$ adversary $\adv$ breaks $\plonkprotfs$'s $\ur{2}$ property is at most $\epsop + 9 \cdot (\epsbind + \infrac{2}{\FF_p}) + \epss + \epsro$, where $\epsro$ is probability that a $\ppt$ adversary finds collision in a random oracle. \end{lemma} \begin{proof} Let $\adv(\REL,\srs = (\gone{1, \chi, \ldots, \chi^{\noofc + 2}}, \gtwo{\chi}))$ be an algebraic adversary tasked to break the $\ur{2}$-ness of $\plonkprotfs$. We show that the first 2 rounds of the protocol determines, along with the verifiers challenges, the rest of it. This is done by game hops. In the games, the adversary outputs two proofs $\zkproof$ and $\zkproof'$ for the same statement. To distinguish polynomials and commitments which an honest prover sends in the proof from the polynomials and commitments computed by the adversary we write the latter using indices $0$ and $1$ (two indices as we have two transcripts), e.g.~to describe the quotient polynomial provided by the adversary we write $\p{t}^0$ and $\p{t}^1$ instead of $\p{t}$ as in the description of the protocol. \ngame{0} In this game, the adversary is given the SRS and wins if provides two transcripts that match on all $5$ messages send by the prover or finds a collision in the random oracle. Since such two transcripts cannot break the unique response property, the adversary wins this game with probability $\epsro$ tops. \ngame{1} This game is identical to Game $\game{0}$ except that now the adversary additionally wins if it provides two transcripts that matches on the first four messages of the proof. \ncase{Game 0 to Game 1} We show that the probability that $\adv$ wins in one game but does not in the other is negligible. Observe that in Round 5 of the proof, the adversary is given a challenge $v$ and has to open the previously computed commitments. Since the transcripts match up to Round 4, the challenge is the same in both. Hence, to be able to give two different openings in Round 5, $\adv$ has to break the unique opening property of the KZG commitment scheme which happens with probability $\epsop$ tops. % Since % there are two commitments that the adversary opens, by the union bound % probability that $\adv$ wins in one game but not the other is upper-bounded % by % $2 \cdot \epsop$. \ngame{2} This game is identical to Game $\game{1}$ except that now the adversary additionally wins if it provides two transcripts that matches on the first three messages of the proof. \ncase{Game 0 to Game 1} In Round 4 of the protocol the adversary has to provide evaluations $a_\chz = \p{a}(\chz), b_\chz = \p{b}(\chz), c_\chz = \p{c}(\chz), t_\chz = \p{t}(\chz), S_{1, \chz} = \p{S_{\sigma 1}}(\chz), s_{2, \chz} = \p{S_{\sigma 2}}(\chz), z_\chz = \p{z}(\chz \omega)$ of previously committed polynomials, and compute and evaluate a linearlization polynomial $\p{r}$. As before, the adversary cannot provide two different evaluations for the committed polynomials, since that would require breaking the evaluation binding property, which happens (by the union bound) with probability at most $7 \cdot (\epsbind + \infrac{2}{\abs{\FF_p}})$. The latter terms are since the adversary does not provide an opening for each of the commitment separately, but only in a batched way. That comes with $\infrac{1}{\FF_p}$ of security loss. Another $\infrac{1}{\FF_p}$ security loss comes from the fact that the verification of commitment openings are batched as well. The adversary cannot also provide two different linearization polynomials $\p{r^0}$ and $\p{r^1}$ evaluations $r^0_\chz$ and $r^1_\chz$ as the linearization polynomial is determined by values known to the verifier who also can compute a commitment to $\p{r}(X)$ equal $\gone{\p{r}(\chi)}$ by its own. The evaluation of $\p{r}$ provided by the adversary is later checked, as $\adv$ opens the commitment in Round 5. Hence, the probability that the adversary manages to build two convincing proofs that differ in evaluations $r_\chz$ and $r'_\chz$ is at most $\epsbind + \infrac{2}{\abs{\FF_p}}$. Hence, the probability that adversary wins in one game but does not in the other is upper-bounded by $8 \cdot (\epsbind + \infrac{2}{\FF_p})$ \ngame{3} This game is identical to Game $\game{2}$ except that now the adversary additionally wins if it provides two transcripts that matches on the first two messages of the proof. \ncase{Game 2 to Game 3} In Round 3 the adversary computes the quotient polynomial $\pt(X)$ and provides its commitment that compounds of three separate commitments $\gone{\p{t_{lo}}(\chi), \p{t_{mid}}(\chi), \p{t_{hi}}(\chi)}$. Let $\gone{\p{t^0_{lo}}(\chi), \p{t^0_{mid}}(\chi), \p{t^0_{hi}}(\chi)}$ be the commitments output by the adversary in one transcript, and $\gone{\p{t^1_{lo}}(\chi), \p{t^1_{mid}}(\chi), \p{t^1_{hi}}(\chi)}$ the commitments provided in the other. % Since the commitment scheme is deterministic, the adversary cannot come up with two different valid commitments for the same polynomial. If the adversary picks two different polynomials: $\p{t^0}(X)$, that is committed as $\gone{\p{t^0_{lo}}(\chi), \p{t^0_{mid}}(\chi), \p{t^0_{hi}}(\chi)}$, and $\p{t^1}(X)$ that is committed as $\gone{\p{t^1_{lo}}(\chi), \p{t^1_{mid}}(\chi), \p{t^1_{hi}}(\chi)}$, then one of them has to be computed incorrectly. Importantly, polynomial $\p{t}(X)$ assures that the constraints of the system hold. Hence, the probability that one of $\p{t^0}(X)$, $\p{t^1}(X)$ is computed incorrectly, the adversary gives and opens acceptably a commitment to it, and the proof is acceptable, is upper bounded by the soundness of the proof system $\epss$. Alternatively, $\adv$ may compute a commitment to an invalid $\p{t^0}(X)$ (or $\p{t^1}(X)$) and later open the commitment at $\chz$ to $\p{t}(\chz)$. That is, give an evaluation from the correct polynomial $\p{t}(X)$. Since the commitment scheme is evaluation binding, probability of such event is upper bounded by $\epsbinding + \infrac{2}{\abs{\FF_p}}$. \ncase{Conclusion} Taking all the games together, probability that $\adv$ wins in $\game{3}$ is upper-bounded by \[ 2 \cdot \epsop + 9 \cdot (\epsbind + \infrac{2}{\FF_p}) + \epsro + \epss. \] \qed \end{proof} \subsection{Forking soundness} \begin{lemma} \label{lem:plonkprot_ss} Let $\plonkprot$'s idealized verifier fail with probability $\epsid (\secpar)$, and $(\noofc + 2, 1)$-$\dlog$ problem be $\epsdlog (\secpar)$ hard. Then $\plonkprot$ is $(\epsid (\secpar) + \epsdlog (\secpar) , 3, 3 \noofc + 1)$-forking sound against algebraic adversary $\adv$. \end{lemma} \begin{proof} The main idea of the proof is to show that an adversary who breaks forking soundness can be used to break hiding properties of the polynomial commitment scheme or a $\dlog$ problem instance. The proof goes by game hops. Let $\tree$ be the tree produced by $\tdv$ by rewinding $\adv$. Note that since the tree branches after Round 3, the instance $\inp$, commitments $\gone{\p{a} (\chi), \p{b} (\chi), \p{c} (\chi), \p{z} (\chi), \p{t_{lo}} (\chi), \p{t_{mid}} (\chi), \p{t_{hi}} (\chi)}$, and challenges $\alpha, \beta, \gamma$ are the same. The tree branches after the third round of the protocol where the challenge $\chz$ is presented, thus tree $\tree$ is build using different values of $\chz$. % We consider the following games. \ncase{Game 0} In this game the adversary wins if % \begin{inparaenum}[(1)] % \item all the transcripts it produced are acceptable by the ideal verifier, i.e.~$\vereq_{\inp, \zkproof}(X) = 0$, cf.~\cref{eq:ver_eq}, and % \item none of commitments $\gone{\p{a} (\chi), \p{b} (\chi), \p{c} (\chi), \p{z} (\chi), \p{t_{lo}} (\chi), \p{t_{mid}} (\chi), \p{t_{hi}} (\chi)}$ use elements from a simulated proof, and % \item the extractor fails to extract a valid witness out of the proof. %\end{inparaenum} \ncase{Probability that $\adv$ wins Game 0 is negligible} Probability of $\adv$ winning this game is $\epsid(\secpar)$ as the protocol $\plonkprot$, instantiated with the idealised verification equation, is perfectly knowledge sound except with negligible probability of the idealised verifier failure $\epsid(\secpar)$. Hence for a valid proof $\zkproof$ for a statement $\inp$ there exists a witness $\wit$, such that $\REL(\inp, \wit)$ holds. Note that since the $\tdv$ produces $(3 \noofc + 1)$ acceptable transcripts for different challenges $\chz$, it obtains the same number of different evaluations of polynomials $\p{a} (X), \p{b} (X), \p{c} (X), \p{z} (X), \p{t} (X)$. Since the transcripts are acceptable by an idealised verifier, the equality between polynomial $\p{t} (X)$ and combination of polynomials $\p{a} (X), \p{b} (X), \p{c} (X), \p{z} (X)$ described in Round 3 of the protocol holds. Hence, $\p{a} (X), \p{b} (X), \p{c} (X)$ encodes the valid witness for the proven statement. Since $\p{a} (X), \p{b} (X), \p{c} (X)$ are of degree at most $(\noofc + 2)$ and there is more than $(\noofc + 2)$ their evaluations known, $\extt$ can recreate polynomials' coefficients by interpolation and reveal the witness with probability $1$. Hence, the probability that extraction fails in that case is upper-bounded by probability of an idealised verifier failing $\epsid(\secpar)$, which is negligible. \ncase{Game 1} In this game the adversary additionally wins if %\begin{inparaenum} % \item it produces a transcript in $\tree$ such that $\vereq_{\inp, \zkproof}(\chi) = 0$, but $\vereq_{\inp, \zkproof}(X) \neq 0$, and % \item none of commitments $\gone{\p{a} (\chi), \p{b} (\chi), \p{c} (\chi), \p{z} (\chi), \p{t_{lo}} (\chi), \p{t_{mid}} (\chi), \p{t_{hi}} (\chi)}$ use elements from a simulated proof. % \end{inparaenum} The first condition means that the ideal verifier does not accept the proof, but the real verifier does. \ncase{Game 0 to Game 1} Assume the adversary wins in Game 1, but does not win in Game 0. We show that such adversary may be used to break the $\dlog$ assumption. More precisely, let $\tdv$ be an algorithm that for relation $\REL$ and randomly picked $\srs \sample \kgen(\REL)$ produces a tree of acceptable transcripts such that the winning condition of the game holds. Let $\rdvdlog$ be a reduction that gets as input an $(\noofc + 2, 1)$-dlog instance $\gone{1, \ldots, \chi^{\noofc + 2}}, \gtwo{\chi}$ and is tasked to output $\chi$. The reduction $\rdvdlog$ proceeds as follows. \begin{enumerate} \item Build $\plonkprot$'s SRS $\srs$ using the input $\dlog$ instance and start $\tdv(\adv, \srs)$; \item Let $(1, \tree)$ be the output returned by $\tdv$. Let $\inp$ be a relation proven in $\tree$. Consider a transcript $\zkproof \in \tree$ such that $\vereq_{\inp, \zkproof}(X) \neq 0$, but $\vereq_{\inp, \zkproof}(\chi) = 0$. Since $\adv$ is algebraic, all group elements included in $\tree$ are extended by their representation as a combination of the input $\GRP_1$-elements. Hence, all coefficients of the verification equation polynomial $\vereq_{\inp, \zkproof}(X)$ are known. \item Find $\vereq_{\inp, \zkproof}(X)$ zero points and find $\chi$ among them. \item Return $\chi$. \end{enumerate} Hence, the probability that the adversary wins Game 1 is upper-bounded by $\epsdlog(\secpar)$. \end{proof} \iffalse \begin{proof} Let $\srs$ be $\plonkprot$'s SRS and denote by $\srs_1$ all SRS's $\GRP_1$-elements; that is, $\srs_1 = \gone{1, \chi, \ldots, \chi^{\noofc + 2}}$. Let $\tdv$ be an algebraic adversary that produces a statement $\inp$ and a $(1, 1, 3\noofc + 1, 1)$-tree of acceptable transcripts $\tree$. Note that in all transcripts the instance $\inp$, proof elements $\gone{\p{a}(\chi), \p{b}(\chi), \p{c}(\chi), \p{z}(\chi), \p{t}(\chi)}$ and challenges $\alpha, \beta, \gamma$ are common as the transcripts share the first three rounds. The tree branches after the third round of the protocol where the challenge $\chz$ is presented, thus tree $\tree$ is build using different values of $\chz$. We consider two games. \ncase{Game 0} In this game the adversary wins if all the transcripts it produced are acceptable by the ideal verifier, i.e.~$\vereq_{\inp, \zkproof}(X) = 0$, cf.~\cref{eq:ver_eq}, yet the extractor fails to extract a valid witness out of them. Probability of $\tdv$ winning this game is $\epsid(\secpar)$ as the protocol $\plonkprot$, instantiated with the idealised verification equation, is perfectly sound except with negligible probability of the idealised verifier failure $\epsid(\secpar)$. Hence for a valid proof $\zkproof$ for a statement $\inp$ there exists a witness $\wit$, such that $\REL(\inp, \wit)$ holds. Note that since the $\tdv$ produces $(3 \noofc + 1)$ acceptable transcripts for different challenges $\chz$, it obtains the same number of different evaluations of polynomials $\p{a}, \p{b}, \p{c}, \p{z}, \p{t}$. Since the transcripts are acceptable by an idealised verifier, the equality between polynomial $\p{t}$ and combination of polynomials $\p{a}, \p{b}, \p{c}, \p{z}$ described in Round 3 of the protocol holds. Hence, $\p{a}, \p{b}, \p{c}$ encodes the valid witness for the proven statement. Since $\p{a}, \p{b}, \p{c}$ are of degree at most $(\noofc + 2)$ and there is more than $(\noofc + 2)$ their evaluations known, $\extt$ can recreate their coefficients by interpolation and reveal the witness with probability $1$. Hence, the probability that extraction fails in that case is upper-bounded by probability of an idealised verifier failing $\epsid(\secpar)$, which is negligible. \ncase{Game 1} In this game the adversary additionally wins if it produces a transcript in $\tree$ such that $\vereq_{\inp, \zkproof}(\chi) = 0$, but $\vereq_{\inp, \zkproof}(X) \neq 0$. That is, the ideal verifier does not accept the proof, but the real verifier does. \ncase{Game 0 to Game 1} Assume the adversary wins in Game 1, but does not win in Game 0. We show that such adversary may be used to break the $\dlog$ assumption. More precisely, let $\tdv$ be an adversary that for relation $\REL$ and randomly picked $\srs \sample \kgen(\REL)$ produces a tree of acceptable transcripts such that the winning condition of the game holds. Let $\rdvdlog$ be a reduction that gets as input an $(\noofc + 2, 1)$-dlog instance $\gone{1, \ldots, \chi^{\noofc}}, \gtwo{\chi}$ and is tasked to output $\chi$. The reduction proceeds as follows---it gives the input instance to the adversary as the SRS. Let $(1, \tree)$ be the output returned by $\adv$. Let $\inp$ be a relation proven in $\tree$. Consider a transcript $\zkproof \in \tree$ such that $\vereq_{\inp, \zkproof}(X) \neq 0$, but $\vereq_{\inp, \zkproof}(\chi) = 0$. Since the adversary is algebraic, all group elements included in $\tree$ are extended by their representation as a combination of the input $\GRP_1$-elements. Hence all coefficients of the verification equation polynomial $\vereq_{\inp, \zkproof}(X)$ are known and $\rdvdlog$ can find its zero points. Since $\vereq_{\inp, \zkproof}(\chi) = 0$, the targeted discrete log value $\chi$ is among them. Hence, the probability that this event happens is upper-bounded by $\epsdlog(\secpar)$. \qed \end{proof} \fi \subsection{Honest verifier zero-knowledge} \begin{lemma} \label{lem:plonk_hvzk} Let $\plonkprot$ be zero knowledge with security $\epszk(\secpar)$. Let $(\pR, \pS, \pT, \pf, 1)$-uber assumption for $\pR, \pS, \pT, \pf$ as defined in \cref{eq:uber} hold with security $\epsuber(\secpar)$. Then $\plonkprot$ is computationally honest verifier zero-knowledge with simulator $\simulator$ that does not require a SRS trapdoor with security $\epszk(\secpar) + \epsuber(\secpar)$.\footnote{The simulator works as a simulator for proofs that are zero-knowledge in the standard model. However, we do not say that $\plonk$ is HVZK in the standard model as proof of that \emph{requires} the SRS simulator.} \end{lemma} \begin{proof} The proof goes by game-hopping. The environment that controls the games provides the adversary with a SRS $\srs$, then the adversary outputs an instance--witness pair $(\inp, \wit)$ and, depending on the game, is provided with either real or simulated proof for it. In the end of the game the adversary outputs either $0$ if it believes that the proof it saw was provided by the simulator and $1$ in the other case. \ngame{0} In this game $\adv(\srs)$ picks an instance--witness pair $(\inp, \wit)$ and gets a real proof $\zkproof$ for it. \ngame{1} In this game for $\adv(\srs)$ picks an instance--witness pair $(\inp, \wit)$ and gets a proof $\zkproof$ that is simulated by a simulator $\simulator_\chi$ which utilises for the simulation the SRS trapdoor and proceeds as described in \cref{sec:plonk_explained}. \COMMENT{follows. In the first round the simulator $\simulator_\chi$ picks randomisers $b_1, \ldots b_9$, sets $\wit_i = 0$, for $i \in \range{1}{3 \noofc}$, computes polynomials $\pa(X), \pb(X), \pc(X)$ and outputs $\gone{\pa(\chi), \pb(\chi), \pc(\chi)}$. Then it picks Round 1 challenges $\beta, \gamma$ honestly. In Round 2 $\simulator_\chi$ computes the polynomial $\pz(X)$ and outputs $\gone{\pz(\chi)}$. Then it picks randomly Round 2 challenge $\alpha$. In Round 3 the simulator computes polynomial $\pt(X)$ and evaluates it at $\chi$, then outputs $\gone{\ptlo(\chi), \ptmid(\chi), \pthi(\chi)}$. Note that this evaluation is feasible (in the polynomial time with non-negligible probability) only since $\simulator_\chi$ knows the trapdoor. In the last two rounds the simulator proceeds as an honest prover would proceed and picks corresponding challenges at random as an honest verifier would. } \ncase{Game 0 to Game 1} Since $\plonk$ is zero-knowledge, probability that $\adv$ outputs a different bit in both games is negligible. Hence \( \abs{\prob{\game{0}} - \prob{\game{1}}} \leq \epszk(\secpar). \) \ngame{2} In this game $\adv(\srs)$ picks an instance--witness pair $(\inp, \wit)$ and gets a proof $\zkproof$ simulated by the simulator $\simulator$ which proceeds as follows. In Round 1 the simulator picks randomly both the randomisers $b_1, \ldots, b_6$ and sets $\wit_i = 0$ for $i \in \range{1}{3\noofc}$. Then $\simulator$ outputs $\gone{\p{a}(\chi), \p{b}(\chi), \p{c}(\chi)}$. For the first round challenge, the simulator picks permutation argument challenges $\beta, \gamma$ randomly. In Round 2, the simulator computes $\p{z}(X)$ from the newly picked randomisers $b_7, b_8, b_9$ and coefficients of polynomials $\p{a}(X), \p{b}(X), \p{c}(X)$. Then it evaluates $\p{z}(X)$ honestly and outputs $\gone{\p{z}(\chi)}$. Challenge $\alpha$ that should be sent by the verifier after Round 2 is picked by the simulator at random. In Round 3 the simulator starts by picking at random a challenge $\chz$, which in the real proof comes as a challenge from the verifier sent \emph{after} Round 3. Then $\simulator$ computes evaluations \(\p{a}(\chz), \p{b}(\chz), \p{c}(\chz), \p{S_{\sigma 1}}(\chz), \p{S_{\sigma 2}}(\chz), \pubinppoly(\chz), \lag_1(\chz), \p{Z_H}(\chz),\allowbreak \p{z}(\chz\omega)\) and computes $\p{t}(X)$ honestly. Since for a random $\p{a}(X), \p{b}(X), \p{c}(X), \p{z}(X)$ the constraint system is (with overwhelming probability) not satisfied and the constraints-related polynomials are not divisible by $\p{Z_H}(X)$, hence $\p{t}(X)$ is a rational function rather than a polynomial. Then, the simulator evaluates $\p{t}(X)$ at $\chz$ and picks randomly a degree-$(3 \noofc - 1)$ polynomial $\p{\tilde{t}}(X)$ such that $\p{t}(\chz) = \p{\tilde{t}}(\chz)$ and publishes a commitment $\gone{\p{\tilde{t}_{lo}}(\chi), \p{\tilde{t}_{mid}}(\chi), \p{\tilde{t}_{hi}}(\chi)}$. After this round the simulator outputs $\chz$ as a challenge. In the next round, the simulator computes polynomial $\p{r}(X)$ as an honest prover would, cf.~\cref{sec:plonk_explained} and evaluates $\p{r}(X)$ at $\chz$. The rest of the evaluations are already computed, thus $\simulator$ simply outputs \( \p{a}(\chz), \p{b}(\chz), \p{c}(\chz), \p{S_{\sigma 1}}(\chz), \p{S_{\sigma 2}}(\chz), \p{t}(\chz), \p{z}(\chz \omega)\,. \) After that it picks randomly the challenge $v$, proceeds in the last round as an honest prover would proceed and outputs the final challenge, $u$, by picking it at random as well. \ncase{Game 1 to Game 2} We now describe the reduction $\rdv$ which relies on the $(\pR, \pS, \pT, \pF, 1)$-uber assumption, cf.~\cref{sec:uber_assumption} where $\pR, \pS, \pT, \pF$ are polynomials over variables $\vB = B_1, \ldots, B_9$ and are defined as follows. Let $E = \smallset{\smallset{2}, \smallset{3, 4}, \smallset{5, 6}, \smallset{7, 8, 9}}$ and $E' = E \setminus \smallset{2}$. Let \begin{align} \label{eq:uber} \pF(\vB) & = \smallset{B_1} \cup \smallset{B_1B_i \mid i \in A,\ A \in E'} \cup \smallset{B_1B_iB_j \mid i \in A, j \in B,\ A, B \in E', B \neq A} \cup \notag\\ & \smallset{B_1B_iB_jB_k \mid i \in A, j \in B, k \in C,\ A, B, C \in E', A \neq B \neq C \neq A}\notag\,,\\ \pR(\vB) & = \smallset{B_i \mid i \in A,\ A \in E} \cup \smallset{B_i B_j \mid i \in A, j \in B,\ A \neq B, A, B \in E} \cup \\ & \smallset{B_i B_j B_k \mid i \in A,\ j \in B,\ k \in C,\ A, B, C \text{ all different and in } E} \cup \notag \\ & \smallset{B_i B_j B_k B_l \mid i \in A,\ j \in B,\ k \in C,\ l \in D,\ A, B, C, D \text{ all different and in } E} \notag \\ & \setminus \pF(\vB)\,,\notag \\ \pS(\vB) & = \emptyset, \qquad \pT(\vB) = \emptyset. \end{align} That is, the elements of $\pR$ are all singletons, pairs, triplets and quadruplets of $B_i$ variables that occur in polynomial $\pt(\vB)$ except the challenge element $\pf(\vB)$ which are all elements that depends on a variable $B_1$. Variables $\vB$ are evaluated to randomly picked $\vb = b_1, \ldots, b_9$. The reduction $\rdv$ learns $\gone{\pR}$ and challenge $\gone{\vec{w}} = \gone{w_1, \ldots, w_{12}}$ where $\vec{w}$ is either a vector of evaluations $\pF(\vb)$ or a sequence of random values $y_1, \ldots, y_{12}$, for the sake of concreteness we state $w_1 = b_1$ or $w_1 = y_1$ (depending on the chosen random bit). Then it picks $\chi$, $\chz$ and computes the SRS $\srs$ from $\chi$. Elements $b_i$ are interpreted as polynomials in $X$ that are evaluated at $\chi$, i.e. $b_i = b_i(\chi)$. Next, $\rdv$ sets for $\xi_i, \zeta_i \sample \FF_p$ \( \gone{\p{\tb}_1(X)} = (X - \chz)(X - \ochz) \gone{w_1}(X) + \xi_i (X - \chz) \gone{1} + \zeta_i (X - \ochz) \gone{1}, % \text{ for } i \in % \range{1}{9}, u_1 \), and \( \gone{\p{\tb}_i(X)} = (X - \chz)(X - \ochz) \gone{b_i}(X) + \xi_i (X - \chz) \gone{1} + \zeta_i (X - \ochz) \gone{1}, % \text{ for } i \in % \range{1}{9}, u_1 \) for $i \in \range{2}{9}$. Denote by $\tb_i$ evaluations of $\p{\tb}_i$ at $\chi$. The reduction computes all $\gone{\tb_i \tb_j}, \gone{\tb_i \tb_j \tb_k}, \gone{\tb_i \tb_j \tb_k \tb_l}$ such that $\gone{B_i B_j, B_i B_j B_k, B_i B_j B_k B_l} \in \pR$. This is possible since $\rdv$ knows all singletons $\gone{w_1, b_2, \ldots, b_9}$ and pairs $\gone{b_i b_j} \in \pR$ which can be used to compute all required pairs $\gone{\tb_i \tb_j}$: \begin{align*} \gone{\tb_i \tb_j} & = ((\chi - \chz)(\chi - \ochz)\gone{b_i} + \xi_i (\chi - \chz)\gone{1} + \zeta_i (\chi - \ochz) \gone{1}) \cdot \\ & ((\chi - \chz)(\chi - \ochz)\gone{b_j} + \xi_j (\chi - \chz)\gone{1} + \zeta_j (\chi - \ochz) \gone{1}) = \\ & ((\chi - \chz)(\chi - \ochz))^2 \gone{b_i b_j} + ((\chi - \chz)(\chi - \ochz)\gone{b_i} (\xi_j (\chi - \chz) \gone{1} + \zeta_j (\chi - \ochz) \gone{1}) + \\ & ((\chi - \chz)(\chi - \ochz)\gone{b_j} (\xi_i (\chi - \chz) \gone{1} + \zeta_i (\chi - \ochz) \gone{1}) + \psi, \end{align*} where $\psi$ compounds of $\xi_i, \xi_j, \zeta_i, \zeta_j, \chz, \ochz, \chi$ which are all known by $\rdv$ and no $b_i$ nor $b_j$. Analogously for the triplets and quadruplets and elements dependent on~$\vec{w}$. Next the reduction runs the adversary $\adv(\srs)$ and obtains from $\adv$ an instance--witness pair $(\inp, \wit)$. $\rdv$ now prepares a simulated proof as follows: \begin{compactdesc} \item[Round 1] $\rdv$ computes $\gone{\pa(\chi)}$ using as randomisers $\gone{\tb_1}, \gone{\tb_2}$ and setting $\wit_i = 0$, for $i \in \range{1}{3 \noofc}$. Similarly it computes $\gone{\pb(\chi)}, \gone{\pc(\chi)}$. $\rdv$ publishes the obtained values and picks a Round 1 challenge $\beta, \gamma$ at random. Note that regardless $w_1 = b_1$ or a random element, $\gone{a(\chi)}$ is random. Thus $\rdv$'s output has the same distribution as output of a real prover. \item[Round 2] $\rdv$ computes $\gone{\pz(\chi)}$ using $\tb_7, \tb_8, \tb_9$ and publishes it. Then it picks randomly the challenge $\alpha$. This round output is independent on $b_1$ thus $\rdv$'s output is indistinguishable from the prover's. \item[Round 3] The reduction computes $\p{t_{lo}}(\chi), \p{t_{mid}}(\chi), \p{t_{hi}}(\chi)$, which all depend on $b_1$. To that end $\gone{\tb_1}$ is used. Note that if $\vec{w}$ is a vector of $\pF(b_1, \ldots, b_9)$ evaluations then $\gone{\p{t_{lo}}(\chi), \p{t_{mid}}(\chi), \p{t_{hi}}(\chi)}$ is the same as the real prover's. Alternatively, if $\vec{w}$ is a vector of random values, then $\p{t_{lo}}(\chi), \p{t_{mid}}(\chi), \p{t_{hi}}(\chi)$ are all random polynomials which evaluates at $\chz$ to the same value as the polynomials computed by the real prover. That is, in that case $\p{t_{lo}}(\chi), \p{t_{mid}}(\chi), \p{t_{hi}}(\chi)$ are as the simulator $\simulator$ would compute. Eventually, $\rdv$ outputs $\chz$. \item[Round 4] The reduction outputs $\pa(\chz), \pb(\chz), \pc(\chz), \p{S_{\sigma 1}}(\chz), \p{S_{\sigma 2} (\chz)}, \pt(\chz), \pz(\ochz)$. For the sake of concreteness, denote by $S = \smallset{\pa, \pb, \pc, \pt, \pz}$. Although for a polynomial $\p{p} \in S$, reduction $\rdv$ does not know $\p{p}(\chi)$ or even do not know all the coefficients of $\p{p}$, the polynomials in $S$ was computed such that the reduction always knows their evaluation at $\chz$ and $\ochz$. \item[Round 5] $\rdv$ computes the openings of the polynomial commitments assuring that evaluations at $\chz$ it provided were computed honestly. \end{compactdesc} If the adversary $\adv$'s output distribution differ in Game $\game{1}$ and $\game{2}$ then the reduction uses it to distinguish between $\vec{w} = \pF(b_1, \ldots, b_9)$ and $\vec{w}$ being random, thus \( \abs{\prob{\game{1}} - \prob{\game{2}}} \leq \epsuber(\secpar). \) Eventually, \( \abs{\prob{\game{0}} - \prob{\game{2}}} \leq \epszk(\secpar) + \epsuber(\secpar). \) \qed \end{proof} \subsection{Simulation soundness and simulation extractability of~$\plonkprotfs$} Since \cref{lem:plonkprot_ur,lem:plonkprot_ss} hold, $\plonkprot$ is $\ur{2}$ and forking sound. We now make use of \cref{thm:simsnd} and \cref{thm:se} and show that $\plonkprot_\fs$ is simulation sound and forking simulation-extractable as defined in \cref{sec:simext_def}. \begin{corollary}[Forking simulation extractability of $\plonkprot_\fs$] \label{thm:plonkprotfs_se} Assume an idealised $\plonkprot$ verifier fails at most with probability $\epsid(\secpar)$, the discrete logarithm advantage is bounded by $\epsdlog(\secpar)$ and the $\PCOMp$ is a commitment of knowledge with security $\epsk(\secpar)$, binding security $\epsbind(\secpar)$ and has unique opening property with security $\epsop(\secpar)$. Let $\ro\colon \bin^* \to \bin^\secpar$ be a random oracle. Let $\advse$ be an algebraic adversary that can make up to $q$ random oracle queries, up to $S$ simulation oracle queries, and outputs an acceptable proof for $\plonkprotfs$ with probability at least $\accProb$. Then $\plonkprotfs$ is forking simulation-extractable with extraction error $\eta = \epsur(\secpar)$. The extraction probability $\extProb$ is at least \[ \extProb \geq \frac{1}{q^{3 (\epsid(\secpar)+\epsdlog(\secpar))}} (\accProb - \epsk(\secpar) - 2\cdot\epsbind(\secpar) - \epsop(\secpar))^{3\noofc + 1} -\eps(\secpar)\,, \] for some negligible $\eps(\secpar)$ and $\noofc$ being the number of constrains in the proven circuit. \end{corollary} % \begin{corollary}[Simulation soundness of $\plonkprot_\fs$] % \label{thm:simsnd} % Assume that $\plonkprot$ is $2$-programmable HVZK in the standard model, that % is $\epss(\secpar)$-sound and the $\PCOMp$ is a commitment of knowledge with % security $\epsk(\secpar)$, binding security $\epsbind(\secpar)$ and has unique % opening property with security $\epsop(\secpar)$. Then the probability that a % $\ppt$ adversary $\adv$ breaks simulation soundness of $\ps_{\fs}$ is % upper-bounded by % \( \epsk(\secpar) + 2\cdot\epsbind(\secpar) + \epsop(\secpar) + q_\ro^4 % \epss(\secpar)\,, \) where $q$ is the total number of queries made by the % adversary $\adv$ to a random oracle $\ro\colon \bin^{*} \to \bin^{\secpar}$. % \end{corollary} \section{Non-malleability of $\sonicprotfs$} \label{sec:sonic} \subsection{\sonic{} protocol rolled out} In this section we present $\sonic$'s constraint system and algorithms. Reader familiar with them may jump directly to the next section. \oursubsub{The constraint system} \label{sec:sonic_constraint_system} \sonic's system of constraints composes of three $\multconstr$-long vectors $\va, \vb, \vc$ which corresponds to left and right inputs to multiplication gates and their outputs. It hence holds $\va \cdot \vb = \vc$. There is also $\linconstr$ linear constrains of the form \[ \va \vec{u_q} + \vb \vec{v_q} + \vc \vec{w_q} = k_q, \] where $\vec{u_q}, \vec{v_q}, \vec{w_q}$ are vectors for the $q$-th linear constraint with instance value $k_q \in \FF_p$. Furthermore define polynomials \begin{equation} \begin{split} \p{u_i}(Y) & = \sum_{q = 1}^\linconstr Y^{q + \multconstr} u_{q, i}\,,\\ \p{v_i}(Y) & = \sum_{q = 1}^\linconstr Y^{q + \multconstr} v_{q, i}\,,\\ \end{split} \qquad \begin{split} \p{w_i}(Y) & = -Y^i - Y^{-i} + \sum_{q = 1}^\linconstr Y^{q + \multconstr} w_{q, i}\,,\\ \p{k}(Y) & = \sum_{q = 1}^\linconstr Y^{q + \multconstr} k_{q}. \end{split} \end{equation} $\sonic$ constraint system requires that \begin{align} \label{eq:sonic_constraint} \vec{a}^\top \cdot \vec{\p{u}} (Y) + \vec{b}^\top \cdot \vec{\p{v}} (Y) + \vec{c}^\top \cdot \vec{\p{w}} (Y) + \sum_{i = 1}^{\multconstr} a_i b_i (Y^i + Y^{-i}) - \p{k} (Y) = 0. \end{align} In \sonic{} we will use commitments to the following polynomials. \begin{align*} \pr(X, Y) & = \sum_{i = 1}^{\multconstr} \left(a_i X^i Y^i + b_i X^{-i} Y^{-i} + c_i X^{-i - \multconstr} Y^{-i - \multconstr}\right) \\ \p{s}(X, Y) & = \sum_{i = 1}^{\multconstr} \left( u_i (Y) X^{-i} + v_i(Y) X^i + w_i(Y) X^{i + \multconstr}\right)\\ \pt(X, Y) & = \pr(X, 1) (\pr(X, Y) + \p{s}(X, Y)) - \p{k}(Y)\,. \end{align*} Polynomials $\p{r} (X, Y), \p{s} (X, Y), \p{t} (X, Y)$ are designed such that $\p{t} (0, Y) = \vec{a}^\top \cdot \vec{\p{u}} (Y) + \vec{b}^\top \cdot \vec{\p{v}} (Y) + \vec{c}^\top \cdot \vec{\p{w}} (Y) + \sum_{i = 1}^{\multconstr} a_i b_i (Y^i + Y^{-i}) - \p{k} (Y) $. That is, the prover is asked to show that $\p{t} (0, Y) = 0$, cf.~\cref{eq:sonic_constraint}. Furthermore, the commitment system in $\sonic$ is designed such that it is infeasible for a $\ppt$ algorithm to commit to a polynomial with non-zero constant term. \oursubsub{Algorithms rolled out} \ourpar{$\sonic$ SRS generation $\kgen(\REL)$.} The SRS generating algorithm picks randomly $\alpha, \chi \sample \FF_p$ and outputs \[ \srs = \left( \gone{\smallset{\chi^i}_{i = -\dconst}^{\dconst}, \smallset{\alpha \chi^i}_{i = -\dconst, i \neq 0}^{\dconst}}, \gtwo{\smallset{\chi^i, \alpha \chi^i}_{i = - \dconst}^{\dconst}}, \gtar{\alpha} \right) \] \ourpar{$\sonic$ prover $\prover(\srs, \inp, \wit=\va, \vb, \vc)$.} \begin{description} \item[Round 1] The prover picks randomly randomisers $c_{\multconstr + 1}, c_{\multconstr + 2}, c_{\multconstr + 3}, c_{\multconstr + 4} \sample \FF_p$. Sets $\pr(X, Y) \gets \pr(X, Y) + \sum_{i = 1}^4 c_{\multconstr + i} X^{- 2 \multconstr - i}$. Commits to $\pr(X, 1)$ and outputs $\gone{r} \gets \com(\srs, \multconstr, \pr(X, 1))$. Then it gets challenge $y$ from the verifier. \item[Round 2] $\prover$ commits to $\pt(X, y)$ and outputs $\gone{t} \gets \com(\srs, \dconst, \pt(X, y))$. Then it gets a challenge $z$ from the verifier. \item[Round 3] The prover computes commitment openings. That is, it outputs \begin{align*} \gone{o_a} & = \open(\srs, z, \pr(z, 1), \pr(X, 1)) \\ \gone{o_b} & = \open(\srs, yz, \pr(yz, 1), \pr(X, 1)) \\ \gone{o_t} & = \open(\srs, z, \pt(z, y), \pt(X, y)) \end{align*} along with evaluations $a' = \pr(z, 1), b' = \pr(y, z), t' = \pt(z, y)$. Then it engages in the signature of correct computation playing the role of the helper, i.e.~it commits to $\p{s}(X, y)$ and sends the commitment $\gone{s}$, commitment opening \begin{align*} \gone{o_s} & = \open(\srs, z, \p{s}(z, y), \p{s}(X, y)), \\ \end{align*} and $s'=\p{s}(z, y)$. % Then it obtains a challenge $u$ from the verifier. \item[Round 4] In the next round the prover computes $\gone{c} \gets \com(\srs, \dconst, \p{s}(u, Y))$ and computes commitments' openings \begin{align*} \gone{w} & = \open(\srs, u, \p{s}(u, y), \p{s}(X, y)), \\ \gone{q_y} & = \open(\srs, y,\p{s}(u, y), \p{s}(u, Y)), \end{align*} and returns $\gone{w}, \gone{q_y}, s = \p{s}(u, y)$. Eventually the prover gets the last challenge from the verifier---$z'$. \item[Round 5] In the final round, $\prover$ computes opening $\gone{q_{z'}} = \open(\srs, z', \p{s}(u, z'), \p{s}(u, X))$ and outputs $\gone{q_{z'}}$. \end{description} \ourpar{$\sonic$ verifier $\verifier(\srs, \inp, \zkproof)$.} The verifier in \sonic{} runs as subroutines the verifier for the polynomial commitment. That is it sets $t' = a'(b' + s') - \p{k}(y)$ and checks the following: \begin{equation*} \begin{split} &\PCOMs.\verifier(\srs, \multconstr, \gone{r}, z, a', \gone{o_a}), \\ &\PCOMs.\verifier(\srs, \multconstr, \gone{r}, yz, b', \gone{o_b}),\\ &\PCOMs.\verifier(\srs, \dconst, \gone{t}, z, t', \gone{o_t}),\\ &\PCOMs.\verifier(\srs, \dconst, \gone{s}, z, s', \gone{o_s}),\\ \end{split} \qquad \begin{split} &\PCOMs.\verifier(\srs, \dconst, \gone{s}, u, s, \gone{w}),\\ &\PCOMs.\verifier(\srs, \dconst, \gone{c}, y, s, \gone{q_y}),\\ &\PCOMs.\verifier(\srs, \dconst, \gone{c}, z', \p{s}(u, z'), \gone{q_{z'}}), \end{split} \end{equation*} and accepts the proof iff all the checks holds. Note that the value $\p{s}(u, z')$ that is recomputed by the verifier uses separate challenges $u$ and $z'$. This enables the batching of many proof and outsourcing of this part of the proof to an untrusted helper. \subsection{Unique opening property of $\PCOMs$} \begin{lemma} \label{lem:pcoms_unique_op} $\PCOMs$ has the unique opening property in the AGM. \end{lemma} \begin{proof} Let $z \in \FF_p$ be the attribute the polynomial is evaluated at, $\gone{c} \in \GRP$ be the commitment, $s \in \FF_p$ the evaluation value, and $o \in \GRP$ be the commitment opening. We need to show that for every $\ppt$ adversary $\adv$ probability \[ \Pr \left[ \begin{aligned} & \verify(\srs, \gone{c}, z, s, \gone{o}) = 1, \\ & \verify(\srs, \gone{c}, z, \tilde{s}, \gone{\tilde{o}}) = 1 \end{aligned} \,\left|\, \vphantom{\begin{aligned} & \verify(\srs, \gone{c}, z, s, \gone{o}),\\ & \verify(\srs, \gone{c}, z, \tilde{s}, \gone{\tilde{o}}) \\ &o \neq \tilde{o}) \end{aligned}} \begin{aligned} & \srs \gets \kgen(\secparam, \maxdeg), \\ & (\gone{c}, z, s, \tilde{s}, \gone{o}, \gone{\tilde{o}}) \gets \adv(\srs) \end{aligned} \right.\right] % \leq \negl. \] is at most negligible. As noted in \cite[Lemma 2.2]{EPRINT:GabWilCio19} it is enough to upper bound the probability of the adversary succeeding using the idealised verification equation---which considers equality between polynomials---instead of the real verification equation---which considers equality of the polynomials' evaluations. For a polynomial $f$, its degree upper bound $\maxconst$, evaluation point $z$, evaluation result $s$, and opening $\gone{o(X)}$ the idealised check verifies that \begin{equation} \alpha (X^{\dconst - \maxconst}f(X) \cdot X^{-\dconst + \maxconst} - s) \equiv \alpha \cdot o(X) (X - z)\,, \end{equation} what is equivalent to \begin{equation} f(X) - s \equiv o(X) (X - z)\,. \label{eq:pcoms_idealised_check} \end{equation} Since $o(X)(X - z) \in \FF_p[X]$ then from the uniqueness of polynomial composition, there is only one $o(X)$ that fulfils the equation above. \qed \end{proof} \subsection{Unique response property} The unique response property of $\sonicprot$ follows from the unique opening property of the used polynomial commitment scheme $\PCOMs$. \begin{lemma} \label{lem:sonicprot_ur} If a polynomial commitment scheme $\PCOMs$ is evaluation binding with parameter $\epsbind(\secpar)$ and has unique openings property with parameter $\epsop(\secpar)$, then $\sonicprot$ is $\ur{1}$ with parameter $\epsur(\secpar) \leq \epsbind(\secpar) + \epsop(\secpar)$. \end{lemma} \begin{proof} Let $\adv$ be an adversary that breaks $\ur{1}$-ness of $\sonicprot$. We consider two cases, depending on which round $\adv$ is able to provide at least two different outputs such that the resulting transcripts are acceptable. For the first case we show that $\adv$ can be used to break the evaluation binding property of $\PCOMs$, while for the second case we show that it can be used to break the unique opening property of $\PCOMs$. The proof goes similarly to the proof of \cref{lem:plonkprot_ur} thus we provide only draft of it here. In each Round $i$, for $i > 1$, the prover either commits to some well-defined polynomials (deterministically), evaluates these on randomly picked points, or shows that the evaluations were performed correctly. Obviously, for a committed polynomial $\p{p}$ evaluated at point $x$ only one value $y = \p{p}(x)$ is correct. If the adversary was able to provide two different values $y$ and $\tilde{y}$ that would be accepted as an evaluation of $\p{p}$ at $x$ then the $\PCOMs$'s evaluation binding would be broken. Alternatively, if $\adv$ was able to provide two openings $\p{W}$ and $\p{\tilde{W}}$ for $y = \p{p}(x)$ then the unique opening property would be broken. % Hence the probability that $\adv$ breaks $\ur{1}$-property of $\PCOMs$ is upper-bounded by $\epsbind(\secpar) + \epsop(\secpar)$. \qed \end{proof} \subsection{Forking soundness} \begin{lemma} \label{lem:sonicprot_ss} $\sonicprot$ is $(\epsss(\secpar), 2, \noofc + 1)$-forking sound against algebraic adversaries with \[ \epss(\secpar) \leq \epsid(\secpar) + \epsldlog(\secpar) \,, \] where $\epsid(\secpar)$ is a soundness error of the idealized verifier, and $\epsldlog(\secpar)$ is security of $(\dconst, \dconst)$-$\ldlog$ assumption. \end{lemma} \begin{proof} Similarly as in the case of $\plonk$, the main idea of the proof is to show that an adversary who breaks forking soundness can be used to break a $\dlog$ problem instance. The proof goes by game hops. Let $\tree$ be the tree produced by $\tdv$ by rewinding $\adv$. Note that since the tree branches after Round 2, the instance $\inp$, commitments $\gone{\p{r} (\chi, 1), \p{r} (\chi, y), \p{s} (\chi, y), \p{t} (\chi, y)}$, and challenge $y$ are the same. The tree branches after the second round of the protocol where the challenge $z$ is presented, thus tree $\tree$ is build using different values of $z$. % We consider the following games. \ncase{Game 0} In this game the adversary wins if all the transcripts it produced are acceptable by the ideal verifier, i.e.~$\vereq_{\inp, \zkproof}(X) = 0$, cf.~\cref{eq:ver_eq}, and none of commitments $\gone{\p{r} (\chi, 1), \p{r} (\chi, y), \p{s} (\chi, y), \p{t} (\chi, y)}$ use elements from a simulated proof, and the extractor fails to extract a valid witness out of the proof. \ncase{Probability that $\adv$ wins Game 0 is negligible} Probability of $\adv$ winning this game is $\epsid(\secpar)$ as the protocol $\sonicprot$, instantiated with the idealised verification equation, is perfectly knowledge sound except with negligible probability of the idealised verifier failure $\epsid(\secpar)$. Hence for a valid proof $\zkproof$ for a statement $\inp$ there exists a witness $\wit$, such that $\REL(\inp, \wit)$ holds. Note that since the $\tdv$ produces $(\noofc + 1)$ acceptable transcripts for different challenges $z$. As noted in \cite{CCS:MBKM19} this assures that the correct witness is encoded in $\p{r} (X, Y)$. Hence $\extt$ can recreate polynomials' coefficients by interpolation and reveal the witness with probability $1$. Moreover, the probability that extraction fails in that case is upper-bounded by probability of an idealised verifier failing $\epsid(\secpar)$, which is negligible. \ncase{Game 1} In this game the adversary additionally wins if it produces a transcript in $\tree$ such that $\vereq_{\inp, \zkproof}(\chi) = 0$, but $\vereq_{\inp, \zkproof}(X) \neq 0$, and none of commitments $\gone{\p{r} (\chi, 1), \p{r} (\chi, y), \p{s} (\chi, y), \p{t} (\chi, y)}$ use elements from a simulated proof. The first condition means that the ideal verifier does not accept the proof, but the real verifier does. \ncase{Game 0 to Game 1} Assume the adversary wins in Game 1, but does not win in Game 0. We show that such adversary may be used to break an instance of a $\ldlog$ assumption. More precisely, let $\tdv$ be an algorithm that for relation $\REL$ and randomly picked $\srs \sample \kgen(\REL)$ produces a tree of acceptable transcripts such that the winning condition of the game holds. Let $\rdvdlog$ be a reduction that gets as input an $(\dconst, \dconst)$-ldlog instance $\gone{\chi^{-\dconst}, \ldots, \chi^{\dconst}}, \gtwo{\chi^{-\dconst}, \ldots, \chi^{\dconst}}$ and is tasked to output $\chi$. The reduction $\rdvdlog$ proceeds as follows. \begin{enumerate} \item Build $\sonicprot$'s SRS $\srs$: pick a random $\alpha$ and compute $\gone{\alpha \chi^{- \dconst}, \ldots, \alpha \chi^{-1}, \alpha \chi, \ldots, \alpha \chi^{\dconst}}$, $\gtwo{\alpha \chi^{- \dconst}, \ldots, \alpha \chi^{-1}, \alpha \chi, \ldots, \alpha \chi^{\dconst}}$. Compose the SRS. \item Let $(1, \tree)$ be the output returned by $\tdv$. Let $\inp$ be a relation proven in $\tree$. Consider a transcript $\zkproof \in \tree$ such that $\vereq_{\inp, \zkproof}(X) \neq 0$, but $\vereq_{\inp, \zkproof}(\chi) = 0$. Since $\adv$ is algebraic, all group elements included in $\tree$ are extended by their representation as a combination of the input $\GRP_1$-elements. Hence, all coefficients of the verification equation polynomial $\vereq_{\inp, \zkproof}(X)$ are known. \item Find $\vereq_{\inp, \zkproof}(X)$ zero points and find $\chi$ among them. \item Return $\chi$. \end{enumerate} Hence, the probability that the adversary wins Game 1 is upper-bounded by $\epsldlog(\secpar)$. \end{proof} \subsection{Honest verifier zero-knowledge} \begin{lemma} \label{lem:sonic_hvzk} $\sonic$ is honest verifier zero-knowledge. \end{lemma} \begin{proof} The simulator proceeds as follows. \begin{enumerate} \item Pick randomly vectors $\vec{a}$, $\vec{b}$ and set \begin{equation} \label{eq:ab_eq_c} \vec{c} = \vec{a} \cdot \vec{b}. \end{equation} \item Pick randomisers $c_{\multconstr + 1}, \ldots, c_{\multconstr + 4}$, honestly compute polynomials $\p{r}(X, Y), \p{r'}(X, Y), \p{s}(X, Y)$ and pick randomly challenges $y$, $z$. \item Output commitment $\gone{r} \gets \com(\srs, \multconstr, \p{r} (X, 1))$ and challenge $y$. \item Compute \begin{align*} & a' = \p{r}(z, 1),\\ & b' = \p{r}(z, y),\\ & s' = \p{s}(z, y). \end{align*} \item Pick polynomial $\p{t}(X, Y)$ such that \begin{align*} & \p{t} (X, y) = \p{r} (X, 1) (\p{r}(X, y) + \p{s} (X, y)) - \p{k} (Y)\\ & \p{t} (0, y) = 0 \end{align*} \item Output commitment $\gone{t} = \com (\srs, \dconst, \p{t} (X, y))$ and challenge $z$. \item Continue following the protocol. \end{enumerate} We note that the simulation is perfect. This comes since, except polynomial $\p{t} (X, Y)$ all polynomials are computed following the protocol. For polynomial $\p{t} (X, Y)$ we observe that in a case of both real and simulated proof the verifier only learns commitment $\gone{t} = \p{t} (\chi, y)$ and evaluation $t' = \p{t} (z, y)$. Since the simulator picks $\p{t} (X, Y)$ such that \begin{align*} \p{t} (X, y) = \p{r} (X, 1) (\p{r}(X, y) + \p{s} (X, y)) - \p{k} (Y) \end{align*} Values of $\gone{t}$ are equal in both proofs. Furthermore, the simulator picks its polynomial such that $\p{t}(0, y) = 0$, hence it does not need the trapdoor to commit to it. (Note that the proof system's SRS does not allow to commit to polynomials which have non-zero constant term). \qed \end{proof} \begin{remark} As noted in \cite{CCS:MBKM19}, $\sonic$ is statistically subversion-zero knowledge (Sub-ZK). As noted in \cite{AC:ABLZ17}, one way to achieve subversion zero knowledge is to utilise an extractor that extracts a SRS trapdoor from a SRS-generator. Unfortunately, a NIZK made subversion zero-knowledge by this approach cannot achieve perfect Sub-ZK as one has to count in the probability of extraction failure. However, with the simulation presented in \cref{lem:sonic_hvzk}, the trapdoor is not required for the simulator as it is able to simulate the execution of the protocol just by picking appropriate (honest) verifier's challenges. This result transfers to $\sonicprotfs$, where the simulator can program the random oracle to provide challenges that fits it. \end{remark} \subsection{From forking soundness and unique response property to forking simulation extractability of $\sonicprotfs$} Since \cref{lem:sonicprot_ur,lem:sonicprot_ss} hold, $\sonicprot$ is $\ur{1}$ and forking sound. We now make use of \cref{thm:se} and show that $\sonicprotfs$ is forking simulation-extractable as defined in \cref{def:simext}. \begin{corollary}[Forking simulation extractability of $\sonicprotfs$] \label{thm:sonicprotfs_se} Assume that $\sonicprot$ is $\ur{1}$ with security $\epsur(\secpar) = \epsbind(\secpar) + \epsop(\secpar)$ -- where $\epsbind (\secpar)$ is polynomial commitment's binding security, $\epsop$ is polynomial commitment unique opening security -- and forking-sound with security $\epsss(\secpar)$. Let $\ro\colon \bin^* \to \bin^\secpar$ be a random oracle. Let $\advse$ be an algebraic adversary that can make up to $q$ random oracle queries, up to $S$ simulation oracle queries, and outputs an acceptable proof for $\sonicprotfs$ with probability at least $\accProb$. Then $\sonicprotfs$ is forking simulation-extractable with extraction error $\eta = \epsur(\secpar)$. The extraction probability $\extProb$ is at least \[ \extProb \geq \frac{1}{q^{\noofc}} (\accProb - \epsur(\secpar))^{\multconstr + \linconstr + 1} - \eps(\secpar). \] for some negligible $\eps(\secpar)$, $\multconstr$ and $\linconstr$ being, respectively, the number of multiplicative and linear constrains of the system. \end{corollary} \section{Non-malleability of $\marlinprotfs$} We show that $\marlin$ is forking simulation-extractable. To that end, we show that $\marlin$ has all the required properties: has unique response property, is forking special sound, and its simulator can provide indistinguishable proofs without a trapdoor, just by programming the random oracle. \subsection{$\marlin$ protocol rolled-out} $\marlin$ uses R1CS as arithmetization method. That is, the prover given instance $\inp$ and witness $\wit$ and $|\HHH| \times |\HHH|$ matrices $\vec{A}, \vec{B}, \vec{C}$ shows that $\vec{A} (\inp^\top, \wit^\top)^\top \circ \vec{B} (\inp^\top, \wit^\top)^\top = \vec{C} (\inp^\top, \wit^\top)^\top$. (Here $\circ$ is a entry-wise product.) We assume that the matrices have at most $|\KKK|$ non-zero entries. Obviously, $|\KKK| \leq |\HHH|^2$. Let $b = 3$, the upper-bound of polynomial evaluations the prover has to provide for each of the sent polynomials. Denote by $\dconst$ an upper-bound for $\smallset{|\HHH| + 2b -1, 2 |\HHH| + b - 1, 6 |\KKK| - 6}$. The idea of showing that the constraint system is fulfilled is as follows. Denote by $\vec{z} = (\inp, \wit)$. The prover computes polynomials $\p{z_A} (X), \p{z_B} (X), \p{z_C} (X)$ which encode vectors $\vec{A} \vec{z}, \vec{B} \vec{z}, \vec{C} \vec{z}$ and have degree $< |\HHH|$. Importantly, when constrains are fulfilled, $ \p{z_A} (X) \p{z_B} (X) - \p{z_C} (X) = \p{h_0} (X) \ZERO_\HHH (X)$, for some $\p{h_0} (X)$ and vanishing polynomial $\ZERO_\HHH (X)$. The prover sends commitments to these polynomials and shows that they have been computed correctly. More precisely, it shows that \begin{equation} \label{eq:marlin_eq_2} \forall \vec{M} \in \smallset{\vec{A}, \vec{B}, \vec{C}}, \forall \kappa \in \HHH, \p{z_M} (\kappa) = \sum_{\iota \in \HHH} \vec{M}[\kappa, \iota] \p{z}(\iota). \end{equation} The ideal verifier checks the following equalities \begin{equation} \label{eq:marlin_ver_eq} \begin{aligned} \p{h}_3 (\beta_3) \ZERO_\KKK (\beta_3) & = \p{a} (\beta_3) - \p{b} (\beta_3) (\beta_3 \p{g_3} (\beta_3) + \sigma_3 / |\KKK|)\\ \p{r}(\alpha, \beta_2) \sigma_3 & = \p{h_2} (\beta_2) \ZERO_\HHH (\beta_2) + \beta_2 \p{g2} (\beta_2) + \sigma_2/|\HHH|\\ \p{s}(\beta_1) + \p{r}(\alpha, \beta_1) (\sum_M \eta_M \p{z_M} (\beta_1)) - \sigma_2 \p{z} (\beta_1) & = \p{h_1} (\beta_1) \ZERO_\HHH (\beta_1) + \beta_1 \p{g_1} (\beta_1) + \sigma_1/|\HHH| \\ \p{z_A} (\beta_1) \p{z_B} (\beta_1) - \p{z_C} (\beta_1) & = \p{h_0} (\beta_1) \ZERO_\HHH (\beta_1) \end{aligned} \end{equation} where $\p{g_i} (X), \p{h_i} (X)$, $i \in \range{1}{3}$, $\p{a} (X), \p{b} (X), \sigma_1, \sigma_2, \sigma_3$ are polynomials and variables required by the sumcheck protocol which allows verifier to efficiently verify that \cref{eq:marlin_eq_2} holds. \subsection{Unique response property} \begin{lemma} Let $\PCOM$ be a commitment of knowledge with security $\epsk(\secpar)$, $\epsbind(\secpar)$ and has unique response property with security $\epsop(\secpar)$. Then probability that a $\ppt$ adversary $\adv$ breaks $\marlinprotfs$ $\ur{1}$ property is at most $6 \cdot (\epsbind + \epsop + \epsk)$ \michals{8.9}{Do we need to add probability that the idealized verifier fails $\epsid$?} \end{lemma} \begin{proof} As in previous proofs, we show the property by game hops. Let $N = \p{g_1}, \p{h_1}, \p{g_2}, \p{h_2}, \p{g_3}, \p{h_3}$. That is, $M$ is a set of all polynomials which commitments are send during the protocol after Round 1. \ncase{Game 0} In this game the adversary wins if it breaks evaluation binding, unique opening property, or knowledge soundness of one of commitments for polynomials in $N$. Probability that a $\ppt$ adversary wins in Game 0, is upper bounded by $6 \cdot (\epsbind + \epsop + \epsk)$. \ncase{Game 1} In this game the adversary additionally wins if it breaks the $\ur{2}$ property of the protocol \ncase{Game 0 to Game 1} Probability that the adversary wins in Game 1 but not in Game 0 is $0$. This is since the polynomials in $N$ are uniquely determined. W.l.o.g.~we analyse probability that adversary is able to produce two (different) pairs of polynomials $(\p{h_2}, \p{g_2})$ and $(\p{h'_2}, \p{g'_2})$ such that \begin{align*} \p{h_2} (X) \ZERO_{\HHH} (X) + X \p{g_2} (X) & = \p{h_2} (X) \ZERO_{\HHH} (X) + X \p{g_2} (X)\\ (\p{h_2} (X) - \p{h'_2} (X)) \ZERO_{\HHH} (X) & = X (\p{g'_2} (X) - \p{g'_2} (X)). \end{align*} Since $\p{h_2}, \p{g_2} \in \FF^{< |\HHH| - 1} [X]$ and $\ZERO \in \FF^{|\HHH|} [X]$, LHS has different degree than RHS unless both sides have degree $0$. This happens when $\p{h_2} (X) = \p{h'_2} (X)$ and $\p{g_2} (X) - \p{g'_2} (X)$. \end{proof} \subsection{Forking soundness} \begin{lemma} Assume that an idealised $\marlinprot$ verifier fails with probability at most $\epsid(\secpar)$ and probability that a $\ppt$ adversary breaks dlog is bounded by $\epsdlog(\secpar)$. Then $\marlinprot$ is $(\epsid (\secpar) + \epsdlog (\secpar), 2, d + 1)$-forking sound. \end{lemma} \begin{proof} % \michals{8.9}{Need to check the degrees} The proof goes similarly to the respective proofs for $\plonk$ and $\sonic$. That is, let $\srs$ be $\marlinprot$'s SRS and denote by $\srs_1$ all SRS's $\GRP_1$-elements. Let $\tdv$ be an algebraic adversary that produces a statement $\inp$ and a $(1, \dconst + 1, 1, 1)$-tree of acceptable transcripts $\tree$. Note that in all transcripts the instance $\inp$, proof elements $\sigma_1, \gone{\p{w}(\chi), \p{z_A}(\chi), \p{z_B}(\chi), \p{z_C}(\chi), \p{h_0}(\chi), \p{s}(\chi)}, \gone{\p{g_1}(\chi), \p{h_1}(\chi)}$ and challenges $\alpha, \eta_1, \eta_2, \eta_3$ are common as the transcripts share the first $3$ messages. The tree branches after the third message of the protocol where the challenge $\beta_1$ is presented, thus tree $\tree$ is build using different values of $\beta_1$. We consider the following games. \ncase{Game 0} In this game the adversary wins if all the transcripts it produced are acceptable by the ideal verifier, i.e.~$\vereq_{\inp, \zkproof}(X) = 0$, cf.~\cref{eq:marlin_ver_eq}, yet the extractor fails to extract a valid witness out of them. Probability of $\tdv$ winning this game is $\epsid(\secpar)$ as the protocol $\marlinprot$, instantiated with the idealised verification equation, is perfectly sound except with negligible probability of the idealised verifier failure $\epsid(\secpar)$. Hence for a valid proof $\zkproof$ for a statement $\inp$ there exists a witness $\wit$, such that $\REL(\inp, \wit)$ holds. Note that since the $\tdv$ produces $(\dconst + 1)$ acceptable transcripts for different challenges $\beta_1$, it obtains the same number of different evaluations of polynomials $\p{z_A}, \p{z_B}, \p{z_C}$. Since the transcripts are acceptable by an idealised verifier, the equality $\p{z_A} (X) \p{z_B} (X) - \p{z_C} (X) = \p{h_0} (X) \ZERO_\HHH (X)$ holds and each of $\p{z}_M$, $M \in \smallset{A, B, C}$, has been computed correctly. Hence, $\p{z_A}, \p{z_B}, \p{z_C}$ encodes the valid witness for the proven statement. Since $\p{z_A}, \p{z_B}, \p{z_C}$ are of degree at most $\dconst$ and there is more than $(\dconst + 1)$ their evaluations known, $\extt$ can recreate their coefficients by interpolation and reveal the witness with probability $1$. Hence, the probability that extraction fails in that case is upper-bounded by probability of an idealised verifier failing $\epsid(\secpar)$, which is negligible. \ncase{Game 1} In this game the adversary additionally wins if it produces a transcript in $\tree$ such that $\vereq_{\inp, \zkproof}(\chi) = 0$, but $\vereq_{\inp, \zkproof}(X) \neq 0$. That is, the ideal verifier does not accept the proof, but the real verifier does. \ncase{Game 0 to Game 1} Assume the adversary wins in Game 1, but does not win in Game 0. We show that such adversary may be used to break the $\dlog$ assumption. More precisely, let $\tdv$ be an adversary that for relation $\REL$ and randomly picked $\srs \sample \kgen(\REL)$ produces a tree of acceptable transcripts such that the winning condition of the game holds. Let $\rdvdlog$ be a reduction that gets as input an $(\dconst, 1)$-dlog instance $\gone{1, \ldots, \chi^\dconst}, \gtwo{1, \chi}$ and is tasked to output $\chi$. The reduction proceeds as follows---it gives the input instance to the adversary as the SRS. Let $(1, \tree)$ be the output returned by $\adv$. Let $\inp$ be a relation proven in $\tree$. Consider a transcript $\zkproof \in \tree$ such that $\vereq_{\inp, \zkproof}(X) \neq 0$, but $\vereq_{\inp, \zkproof}(\chi) = 0$. Since the adversary is algebraic, all group elements included in $\tree$ are extended by their representation as a combination of the input $\GRP_1$-elements. Hence all coefficients of the verification equation polynomial $\vereq_{\inp, \zkproof}(X)$ are known and $\rdvdlog$ can find its zero points. Since $\vereq_{\inp, \zkproof}(\chi) = 0$, the targeted discrete log value $\chi$ is among them. Hence, the probability that this event happens is upper-bounded by $\epsdlog(\secpar)$. \end{proof} \subsection{Honest-verifier zero knowledge} \begin{lemma} \label{lem:marlin_hvzk} $\marlinprot$ is honest verifier zero-knowledge. \end{lemma} \begin{proof} The simulator follows the protocol except it picks the challenges $\alpha, \eta_A, \eta_B, \eta_C, \beta_1, \beta_2, \beta_3$ before it picks polynomials it sends. First, it picks $\p{\tilde{z}}_A (X)$, $\p{\tilde{z}}_B (X)$ at random and $\p{\tilde{z}}_C (X)$ such that $\p{\tilde{z}}_A (\beta_1) \p{\tilde{z}}_B (\beta_1) = \p{\tilde{z}}_C (\beta_1)$. Given the challenges and polynomials $\p{\tilde{z}}_A (X)$, $\p{\tilde{z}}_B (X)$, $\p{\tilde{z}}_C (X)$ the simulator computes $\sigma_1 \gets \sum_{\kappa \in \HHH} \p{s}(\kappa) + \p{r}(\alpha, X) (\sum_{M \in \smallset{A, B, C}}\eta_M \p{\tilde{z}}_M(X)) - \sum_{M \in \smallset{A, B, C}} \eta_M \p{r}_M (\alpha, X) \p{\tilde{z}} (X)$. Then the simulator starts the protocol and follows it, except it programs the random oracle that on partial transcripts it returns the challenges picked by $\simulator$. \end{proof} \subsection{From forking soundness and unique response property to forking simulation extractability of $\marlinprotfs$} \begin{corollary} Assume that $\marlinprot$ is $\ur{1}$ with security $\epsur(\secpar) = 6 \cdot (\epsbind + \epsop + \epsk)$, and forking-sound with security $\epsfor (\secpar)$. Let $\ro\colon \bin^* \to \bin^\secpar$ be a random oracle. Let $\advse$ be an algebraic adversary that can make up to $q$ random oracle queries, up to $S$ simulation oracle queries, and outputs an acceptable proof for $\marlinprotfs$ with probability at least $\accProb$. Then $\marlinprotfs$ is forking simulation-extractable with extraction error $\eta = \epsur(\secpar)$. The extraction probability $\extProb$ is at least \[ \extProb \geq \frac{1}{q^{\dconst}} (\accProb - \epsur(\secpar))^{\dconst + 1} - \eps(\secpar). \] for some negligible $\eps(\secpar)$, $\dconst$ being, the upper bound of constrains of the system. \[ \extProb \geq q^{-(\dconst - 1)} (\accProb - 6 \cdot (\epsbind + \epsop + \epsk))^{\dconst} -\eps(\secpar)\,. \] \end{corollary} \section{Further work} We identify a number of problems which we left as further work. First of all, the generalised version of the forking lemma presented in this paper can be generalised even further to include protocols where forking soundness holds for protocols where $\extt$ extracts a witness from a $(n_1, \ldots, n_\mu)$-tree of acceptable transcripts, where more than one $n_j > 1$. I.e.~to include protocols that for witness extraction require transcripts that branch at more than one point. Although we picked $\plonk$ and $\sonic$ as examples for our framework, it is not limited to SRS-based NIZKs. Thus, it would be interesting to apply it to known so-called transparent zkSNARKs like Bulletproofs \cite{SP:BBBPWM18}, Aurora \cite{EC:BCRSVW19} or AuroraLight \cite{EPRINT:Gabizon19a}. Since the rewinding technique and the forking lemma used to show simulation extractability of $\plonkprotfs$ and $\sonicprotfs$ come with security loss, it would be interesting to show SE of these protocols directly in the algebraic group model. Although we focused here only on zkSNARKs, it is worth to investigating other protocols that may benefit from our framework, like e.g.~identification schemes. Last, but not least, this paper would benefit greatly if a more tight version of the generalised forking lemma was provided. However, we have to note here that some of the inequalities used in the proof are already tight, i.e.~for specific adversaries, some of the inequalities are already equalities. % \section*{Acknowledgement} % The second author thanks Antoine Rondelet for helpful discussions. %\begin{spacing}{0.92} \bibliographystyle{abbrv} \bibliography{cryptobib/abbrev3,cryptobib/crypto,additional_bib} %\end{spacing} % \clearpage \appendix %{\Huge{Supplementary Material}} \section{Omitted protocols descriptions} \subsection{Polynomial commitment schemes} \label{sec:pcom} \cref{fig:pcomp,fig:pcoms} present variants of KZG polynomial commitment schemes used in \plonk{} and \sonic{}. The key generation algorithm $\kgen$ takes as input a security parameter $\secparam$ and a parameter $\maxdeg$ which determines the maximal degree of the committed polynomial. We assume that $\maxdeg$ can be read from the output SRS. We emphasize the following properties of a secure polynomial commitment $\PCOM$: \begin{description} \item[Evaluation binding:] A $\ppt$ adversary $\adv$ which outputs a commitment $\vec{c}$ and evaluation points $\vec{z}$ has at most negligible chances to open the commitment to two different evaluations $\vec{s}, \vec{s'}$. That is, let $k \in \NN$ be the number of committed polynomials, $l \in \NN$ number of evaluation points, $\vec{c} \in \GRP^k$ be the commitments, $\vec{z} \in \FF_p^l$ be the arguments the polynomials are evaluated at, $\vec{s},\vec{s}' \in \FF_p^k$ the evaluations, and $\vec{o},\vec{o}' \in \FF_p^l$ be the commitment openings. Then for every $\ppt$ adversary $\adv$ \[ \Pr \left[ \begin{aligned} & \verify(\srs, \vec{c}, \vec{z}, \vec{s}, \vec{o}) = 1, \\ & \verify(\srs, \vec{c}, \vec{z}, \vec{s}', \vec{o}') = 1, \\ & \vec{s} \neq \vec{s}' \end{aligned} \,\left|\,\vphantom{\begin{aligned} & \\ & \\ & \end{aligned}} \begin{aligned} & \srs \gets \kgen(\secparam, \maxdeg),\\ & (\vec{c}, \vec{z}, \vec{s}, \vec{s}', \vec{o}, \vec{o}') \gets \adv(\srs) \end{aligned} \right.\right] \leq \negl\,. \] \end{description} We say that $\PCOM$ has the unique opening property if the following holds: \begin{description} \item[Opening uniqueness:] Let $k \in \NN$ be the number of committed polynomials, $l \in \NN$ number of evaluation points, $\vec{c} \in \GRP^k$ be the commitments, $\vec{z} \in \FF_p^l$ be the arguments the polynomials are evaluated at, $\vec{s} \in \FF_p^k$ the evaluations, and $\vec{o} \in \FF_p^l$ be the commitment openings. Then for every $\ppt$ adversary $\adv$ \[ \Pr \left[ \begin{aligned} & \verify(\srs, \vec{c}, \vec{z}, \vec{s}, \vec{o}) = 1, \\ & \verify(\srs, \vec{c}, \vec{z}, \vec{s}, \vec{o'}) = 1, \\ & \vec{o} \neq \vec{o'} \end{aligned} \,\left|\, \vphantom{\begin{aligned} & \\ & \\ & \end{aligned}} \begin{aligned} & \srs \gets \kgen(\secparam, \maxdeg),\\ & (\vec{c}, \vec{z}, \vec{s}, \vec{o}, \vec{o'}) \gets \adv(\srs) \end{aligned} \right.\right] \leq \negl\,. \] \end{description} Intuitively, opening uniqueness assures that there is only one valid opening for the committed polynomial and given evaluation point. This property is crucial in showing forking simulation-extractability of $\plonk$ and $\sonic$. We show that the $\plonk$'s and $\sonic$'s polynomial commitment schemes satisfy this requirement in \cref{lem:pcomp_op} and \cref{lem:pcoms_unique_op} respectively. We also formalize notion of $k$-hiding property of a polynomial commitment scheme \begin{description} \item[Hiding] Let $\HHH$ be a set of size $\maxdeg + 1$ and $\ZERO_\HHH$ its vanishing polynomial. We say that a polynomial scheme is \emph{hiding} with security $\epsh(\secpar)$ if for every $\ppt$ adversary $\adv$, $k \in \NN$, probability \begin{align*} \Pr\left[ \begin{aligned} & b' = b \end{aligned} \,\left|\, \begin{aligned} & (\srs, \maxdeg) \sample \kgen(\secparam), (f_0, f_1, c, k, b') \gets \adv^{\oraclec}(\srs), f_0, f_1 \in \FF^{\maxdeg} [X] \end{aligned} \right.\right] \leq \frac{1}{2} + \eps(\secpar) \end{align*} Here, $\oraclec$ is a challenge oracle that \begin{compactenum} \item takes polynomials $f_0, f_1$ provided by the adversary and parameter $k$, \item samples bit $b$, \item samples vector $\vec{a} \in \FF^k$, \item computes polynomial, $f'_b (X) = f_b + \ZERO_\HHH (X) (a_0 + a_1 X + \ldots a_{k - 1} X^{k - 1})$, \item outputs polynomial commitment $c = f'_b (\chi)$, \item on adversary's evaluation query $x$ it adds $x$ to initially empty set $Q_x$ and if $|Q_x| \leq k$, it provides $f'_b (x)$. \end{compactenum} \end{description} \begin{description} \item[Commitment of knowledge] For every $\ppt$ adversary $\adv$ who produces commitment $c$, evaluation $s$ and opening $o$ there exists a $\ppt$ extractor $\ext$ such that \[ \Pr \left[ \begin{aligned} & \deg \p{f} \leq \maxdeg\\ & c = \com(\srs, \p{f}),\\ & \verify(\srs, c, z, s, o) = 1 \end{aligned} \,\left|\, \vphantom{ \begin{aligned} & \\ & \\ & \end{aligned} } \begin{aligned} & \srs \gets \kgen(\secparam, \maxdeg),\\ & c \gets \adv(\srs), z \sample \FF_p \\ & (s, o) \gets \adv(\srs, c, z), \\ & \p{f} = \ext_\adv(\srs, c)\\ \end{aligned} \right.\right] \geq 1 - \epsk(\secpar). \] In that case we say that $\PCOM$ is $\epsk(\secpar)$-knowledge. \end{description} Intuitively when a commitment scheme is ``of knowledge'' then if an adversary produces a (valid) commitment $c$, which it can open, then it also knows the underlying polynomial $\p{f}$ which commits to that value. \cite{CCS:MBKM19} shows, using AGM, that $\PCOMs$ is a commitment of knowledge. The same reasoning could be used to show that property for $\PCOMp$. % We require $\PCOM$ to have the following properties: \begin{figure} \centering \begin{pcvstack}[center,boxed] \begin{pchstack} \procedure{$\kgen(\secparam, \maxdeg)$} { \chi \sample \FF^2_p \\ [\myskip] \pcreturn \gone{1, \ldots, \chi^{\numberofconstrains + 2}}, \gtwo{\chi}\\ [\myskip] \hphantom{\hspace*{5.5cm}} %\hphantom{\pcind \p{o}_i(X) \gets \sum_{j = 1}^{t_i} \gamma_i^{j - 1} %\frac{\p{f}_{i,j}(X) - \p{f}_{i, j}(z_i)}{X - z_i}} } \pchspace \procedure{$\com(\srs, \vec{\p{f}}(X))$} { \pcreturn \gone{\vec{c}} = \gone{\vec{\p{f}}(\chi)}\\ [\myskip] \hphantom{\pcind \pcif \sum_{i = 1}^{\abs{\vec{z}}} r_i \cdot \gone{\sum_{j = 1}^{t_j} \gamma_i^{j - 1} c_{i, j} - \sum{j = 1}^{t_j} s_{i, j}} \bullet \gtwo{1} + } } \end{pchstack} % \pcvspace \begin{pchstack} \procedure{$\open(\srs, \vec{\gamma}, \vec{z}, \vec{s}, \vec{\p{f}}(X))$} { \pcfor i \in \range{1}{\abs{\vec{z}}} \pcdo\\ [\myskip] \pcind \p{o}_i(X) \gets \sum_{j = 1}^{t_i} \gamma_i^{j - 1} \frac{\p{f}_{i,j}(X) - \p{f}_{i, j}(z_i)}{X - z_i}\\ [\myskip] \pcreturn \vec{o} = \gone{\vec{\p{o}}(\chi)}\\ [\myskip] \hphantom{\hspace*{5.5cm}} } \pchspace \procedure{$\verify(\srs, \gone{c}, \vec{z}, \vec{s}, \gone{\p{o}(\chi)})$} { \vec{r} \gets \FF_p^{\abs{\vec{z}}}\\ [\myskip] \pcfor i \in \range{1}{\abs{\vec{z}}} \pcdo \\ [\myskip] \pcind \pcif \sum_{i = 1}^{\abs{\vec{z}}} r_i \cdot \gone{\sum_{j = 1}^{t_j} \gamma_i^{j - 1} c_{i, j} - \sum{j = 1}^{t_j} s_{i, j}} \bullet \gtwo{1} + \\ [\myskip] \pcind \sum_{i = 1}^{\abs{\vec{z}}} r_i z_i o_i \bullet \gtwo{1} \neq \gone{- \sum_{i = 1}^{\abs{\vec{z}}} r_i o_i } \bullet \gtwo{\chi} \pcthen \\ \pcind \pcreturn 0\\ [\myskip] \pcreturn 1. } \end{pchstack} \end{pcvstack} \caption{$\PCOMp$ polynomial commitment scheme.} \label{fig:pcomp} \end{figure} \begin{figure} \centering \begin{pcvstack}[center,boxed] \begin{pchstack} \procedure{$\kgen(\secparam, \maxdeg)$} { \alpha, \chi \sample \FF^2_p \\ [\myskip] \pcreturn \gone{\smallset{\chi^i}_{i = -\multconstr}^{\multconstr}, \smallset{\alpha \chi^i}_{i = -\multconstr, i \neq 0}^{\multconstr}},\\ \pcind \gtwo{\smallset{\chi^i, \alpha \chi^i}_{i = -\multconstr}^{\multconstr}}, \gtar{\alpha}\\ %\markulf{03.11.2020}{} \\ % \hphantom{\pcind \p{o}_i(X) \gets \sum_{j = 1}^{t_i} \gamma_i^{j - 1} \frac{\p{f}_{i,j}(X) - \p{f}_{i, j}(z_i)}{X - z_i}} \hphantom{\hspace*{5.5cm}} } \pchspace \procedure{$\com(\srs, \maxconst, \p{f}(X))$} { \p{c}(X) \gets \alpha \cdot X^{\dconst - \maxconst} \p{f}(X) \\ [\myskip] \pcreturn \gone{c} = \gone{\p{c}(\chi)}\\ [\myskip] \hphantom{\pcind \pcif \sum_{i = 1}^{\abs{\vec{z}}} r_i \cdot \gone{\sum_{j = 1}^{t_j} \gamma_i^{j - 1} c_{i, j} - \sum_{j = 1}^{t_j} s_{i, j}} \bullet \gtwo{1} + } } \end{pchstack} % \pcvspace \begin{pchstack} \procedure{$\open(\srs, z, s, f(X))$} { \p{o}(X) \gets \frac{\p{f}(X) - \p{f}(z)}{X - z}\\ [\myskip] \pcreturn \gone{\p{o}(\chi)}\\ [\myskip] \hphantom{\hspace*{5.5cm}} } \pchspace \procedure{$\verify(\srs, \maxconst, \gone{c}, z, s, \gone{\p{o}(\chi)})$} { \pcif \gone{\p{o}(\chi)} \bullet \gtwo{\alpha \chi} + \gone{s - z \p{o}(\chi)} \bullet \gtwo{\alpha} = \\ [\myskip] \pcind \gone{c} \bullet \gtwo{\chi^{- \dconst + \maxconst}} \pcthen \pcreturn 1\\ [\myskip] \rlap{\pcelse \pcreturn 0.} \hphantom{\pcind \pcif \sum_{i = 1}^{\abs{\vec{z}}} r_i \cdot \gone{\sum_{j = 1}^{t_j} \gamma_i^{j - 1} c_{i, j} - \sum{j = 1}^{t_j} s_{i, j}} \bullet \gtwo{1} + } } \end{pchstack} \end{pcvstack} \caption{$\PCOMs$ polynomial commitment scheme.} \label{fig:pcoms} \end{figure} \section{Non-malleability of \plonk{}, omitted proofs and descriptions} \label{sec:plonk_supp_mat} \subsection{$\plonk$ protocol rolled out} \label{sec:plonk_explained} \newcommand{\vql}{\vec{q_{L}}} \newcommand{\vqr}{\vec{q_{R}}} \newcommand{\vqm}{\vec{q_{M}}} \newcommand{\vqo}{\vec{q_{O}}} \newcommand{\vx}{\vec{x}} \newcommand{\vqc}{\vec{q_{C}}} \oursubsub{The constrain system} Assume $\CRKT$ is a fan-in two arithmetic circuit, which fan-out is unlimited and has $\numberofconstrains$ gates and $\noofw$ wires ($\numberofconstrains \leq \noofw \leq 2\numberofconstrains$). \plonk's constraint system is defined as follows: \begin{itemize} \item Let $\vec{V} = (\va, \vb, \vc)$, where $\va, \vb, \vc \in \range{1}{\noofw}^\numberofconstrains$. Entries $\va_i, \vb_i, \vc_i$ represent indices of left, right and output wires of circuits $i$-th gate. \item Vectors $\vec{Q} = (\vql, \vqr, \vqo, \vqm, \vqc) \in (\FF^\numberofconstrains)^5$ are called \emph{selector vectors}: \begin{itemize} \item If the $i$-th gate is a multiplicative gate then $\vql_i = \vqr_i = 0$, $\vqm_i = 1$, and $\vqo_i = -1$. \item If the $i$-th gate is an addition gate then $\vql_i = \vqr_i = 1$, $\vqm_i = 0$, and $\vqo_i = -1$. \item $\vqc_i = 0$ always. \end{itemize} \end{itemize} We say that vector $\vx \in \FF^\noofw$ satisfies constraint system if for all $i \in \range{1}{\numberofconstrains}$ \[ \vql_i \cdot \vx_{\va_i} + \vqr_i \cdot \vx_{\vb_i} + \vqo \cdot \vx_{\vc_i} + \vqm_i \cdot (\vx_{\va_i} \vx_{\vb_i}) + \vqc_i = 0. \] \oursubsub{Algorithms rolled out} \label{sec:plonk_explained} \plonk{} argument system is universal. That is, it allows to verify computation of any arithmetic circuit which has no more than $\numberofconstrains$ gates using a single SRS. However, to make computation efficient, for each circuit there is allowed a preprocessing phase which extend the SRS with circuit-related polynomial evaluations. For the sake of simplicity of the security reductions presented in this paper, we include in the SRS only these elements that cannot be computed without knowing the secret trapdoor $\chi$. The rest of the SRS---the preprocessed input---can be computed using these SRS elements thus we leave them to be computed by the prover, verifier, and simulator. \ourpar{$\plonk$ SRS generating algorithm $\kgen(\REL)$:} The SRS generating algorithm picks at random $\chi \sample \FF_p$, computes and outputs \[ \srs = \left(\gone{\smallset{\chi^i}_{i = 0}^{\numberofconstrains + 2}}, \gtwo{\chi} \right). \] \ourpar{Preprocessing:} Let $H = \smallset{\omega^i}_{i = 1}^{\numberofconstrains }$ be a (multiplicative) $\numberofconstrains$-element subgroup of a field $\FF$ compound of $\numberofconstrains$-th roots of unity in $\FF$. Let $\lag_i(X)$ be the $i$-th element of an $\numberofconstrains$-elements Lagrange basis. During the preprocessing phase polynomials $\p{S_{id j}}, \p{S_{\sigma j}}$, for $\p{j} \in \range{1}{3}$, are computed: \begin{equation*} \begin{aligned} \p{S_{id 1}}(X) & = X,\vphantom{\sum_{i = 1}^{\noofc} \sigma(i) \lag_i(X),}\\ \p{S_{id 2}}(X) & = k_1 \cdot X,\vphantom{\sum_{i = 1}^{\noofc} \sigma(i) \lag_i(X),}\\ \p{S_{id 3}}(X) & = k_2 \cdot X,\vphantom{\sum_{i = 1}^{\noofc} \sigma(i) \lag_i(X),} \end{aligned} \qquad \begin{aligned} \p{S_{\sigma 1}}(X) & = \sum_{i = 1}^{\noofc} \sigma(i) \lag_i(X),\\ \p{S_{\sigma 2}}(X) & = \sum_{i = 1}^{\noofc} \sigma(\noofc + i) \lag_i(X),\\ \p{S_{\sigma 3}}(X) & =\sum_{i = 1}^{\noofc} \sigma(2 \noofc + i) \lag_i(X). \end{aligned} \end{equation*} Coefficients $k_1$, $k_2$ are such that $H, k_1 \cdot H, k_2 \cdot H$ are different cosets of $\FF^*$, thus they define $3 \cdot \noofc$ different elements. \cite{EPRINT:GabWilCio19} notes that it is enough to set $k_1$ to a quadratic residue and $k_2$ to a quadratic non-residue. Furthermore, we define polynomials $\p{q_L}, \p{q_R}, \p{q_O}, \p{q_M}, \p{q_C}$ such that \begin{equation*} \begin{aligned} \p{q_L}(X) & = \sum_{i = 1}^{\noofc} \vql_i \lag_i(X), \\ \p{q_R}(X) & = \sum_{i = 1}^{\noofc} \vqr_i \lag_i(X), \\ \p{q_M}(X) & = \sum_{i = 1}^{\noofc} \vqm_i \lag_i(X), \end{aligned} \qquad \begin{aligned} \p{q_O}(X) & = \sum_{i = 1}^{\noofc} \vqo_i \lag_i(X), \\ \p{q_C}(X) & = \sum_{i = 1}^{\noofc} \vqc_i \lag_i(X). \\ \vphantom{\p{q_M}(X) = \sum_{i = 1}^{\noofc} \vqm_i \lag_i(X),} \end{aligned} \end{equation*} \ourpar{$\plonk$ prover $\prover(\srs, \inp, \wit = (\wit_i)_{i \in \range{1}{3 \cdot \noofc}})$.} \begin{description} \item[Round 1] Sample $b_1, \ldots, b_9 \sample \FF_p$; compute $\p{a}(X), \p{b}(X), \p{c}(X)$ as \begin{align*} \p{a}(X) &= (b_1 X + b_2)\p{Z_H}(X) + \sum_{i = 1}^{\noofc} \wit_i \lag_i(X) \\ \p{b}(X) &= (b_3 X + b_4)\p{Z_H}(X) + \sum_{i = 1}^{\noofc} \wit_{\noofc + i} \lag_i(X) \\ \p{c}(X) &= (b_5 X + b_6)\p{Z_H}(X) + \sum_{i = 1}^{\noofc} \wit_{2 \cdot \noofc + i} \lag_i(X) \end{align*} Output polynomial commitments $\gone{\p{a}(\chi), \p{b}(\chi), \p{c}(\chi)}$. \item[Round 2] Get challenges $\beta, \gamma \in \FF_p$ \[ \beta = \ro(\zkproof[0..1], 0)\,, \qquad \gamma = \ro(\zkproof[0..1], 1)\,. \] Compute permutation polynomial $\p{z}(X)$ \begin{multline*} \p{z}(X) = (b_7 X^2 + b_8 X + b_9)\p{Z_H}(X) + \lag_1(X) + \\ + \sum_{i = 1}^{\noofc - 1} \left(\lag_{i + 1} (X) \prod_{j = 1}^{i} \frac{ (\wit_j +\beta \omega^{j - 1} + \gamma)(\wit_{\noofc + j} + \beta k_1 \omega^{j - 1} + \gamma)(\wit_{2 \noofc + j} +\beta k_2 \omega^{j- 1} + \gamma)} {(\wit_j+\sigma(j) \beta + \gamma)(\wit_{\noofc + j} + \sigma(\noofc + j)\beta + \gamma)(\wit_{2 \noofc + j} + \sigma(2 \noofc + j)\beta + \gamma)}\right) \end{multline*} Output polynomial commitment $\gone{\p{z}(\chi)}$ \item[Round 3] Get the challenge $\alpha = \ro(\zkproof[0..2])$, compute the quotient polynomial \begin{align*} & \p{t}(X) = \\ & (\p{a}(X) \p{b}(X) \selmulti(X) + \p{a}(X) \selleft(X) + \p{b}(X)\selright(X) + \p{c}(X)\seloutput(X) + \pubinppoly(X) + \selconst(X)) \frac{1}{\p{Z_H}(X)} +\\ & + ((\p{a}(X) + \beta X + \gamma) (\p{b}(X) + \beta k_1 X + \gamma)(\p{c}(X) + \beta k_2 X + \gamma)\p{z}(X)) \frac{\alpha}{\p{Z_H}(X)} \\ & - (\p{a}(X) + \beta \p{S_{\sigma 1}}(X) + \gamma)(\p{b}(X) + \beta \p{S_{\sigma 2}}(X) + \gamma)(\p{c}(X) + \beta \p{S_{\sigma 3}}(X) + \gamma)\p{z}(X \omega)) \frac{\alpha}{\p{Z_H}(X)} \\ & + (\p{z}(X) - 1) \lag_1(X) \frac{\alpha^2}{\p{Z_H}(X)} \end{align*} Split $\p{t}(X)$ into degree less then $\noofc$ polynomials $\p{t_{lo}}(X), \p{t_{mid}}(X), \p{t_{hi}}(X)$, such that \[ \p{t}(X) = \p{t_{lo}}(X) + X^{\noofc} \p{t_{mid}}(X) + X^{2 \noofc} \p{t_{hi}}(X)\,. \] Output $\gone{\p{t_{lo}}(\chi), \p{t_{mid}}(\chi), \p{t_{hi}}(\chi)}$. \item[Round 4] Get the challenge $\chz \in \FF_p$, $\chz = \ro(\zkproof[0..3])$. Compute opening evaluations \begin{align*} \p{a}(\chz), \p{b}(\chz), \p{c}(\chz), \p{S_{\sigma 1}}(\chz), \p{S_{\sigma 2}}(\chz), \p{t}(\chz), \p{z}(\chz \omega), \end{align*} Compute the linearisation polynomial \[ \p{r}(X) = \begin{aligned} & \p{a}(\chz) \p{b}(\chz) \selmulti(X) + \p{a}(\chz) \selleft(X) + \p{b}(\chz) \selright(X) + \p{c}(\chz) \seloutput(X) + \selconst(X) \\ & + \alpha \cdot \left( (\p{a}(\chz) + \beta \chz + \gamma) (\p{b}(\chz) + \beta k_1 \chz + \gamma)(\p{c}(\chz) + \beta k_2 \chz + \gamma) \cdot \p{z}(X)\right) \\ & - \alpha \cdot \left( (\p{a}(\chz) + \beta \p{S_{\sigma 1}}(\chz) + \gamma) (\p{b}(\chz) + \beta \p{S_{\sigma 2}}(\chz) + \gamma)\beta \p{z}(\chz\omega) \cdot \p{S_{\sigma 3}}(X)\right) \\ & + \alpha^2 \cdot \lag_1(\chz) \cdot \p{z}(X) \end{aligned} \] Output $\p{a}(\chz), \p{b}(\chz), \p{c}(\chz), \p{S_{\sigma 1}}(\chz), \p{S_{\sigma 2}}(\chz), \p{t}(\chz), \p{z}(\chz \omega), \p{r}(\chz).$ \item[Round 5] Compute the opening challenge $v \in \FF_p$, $v = \ro(\zkproof[0..4])$. Compute the openings for the polynomial commitment scheme \begin{align*} & \p{W_\chz}(X) = \frac{1}{X - \chz} \left( \begin{aligned} & \p{t_{lo}}(X) + \chz^\noofc \p{t_{mid}}(X) + \chz^{2 \noofc} \p{t_{hi}}(X) - \p{t}(\chz)\\ & + v(\p{r}(X) - \p{r}(\chz)) \\ & + v^2 (\p{a}(X) - \p{a}(\chz))\\ & + v^3 (\p{b}(X) - \p{b}(\chz))\\ & + v^4 (\p{c}(X) - \p{c}(\chz))\\ & + v^5 (\p{S_{\sigma 1}}(X) - \p{S_{\sigma 1}}(\chz))\\ & + v^6 (\p{S_{\sigma 2}}(X) - \p{S_{\sigma 2}}(\chz)) \end{aligned} \right)\\ & \p{W_{\chz \omega}}(X) = \frac{\p{z}(X) - \p{z}(\chz \omega)}{X - \chz \omega} \end{align*} Output $\gone{\p{W_{\chz}}(\chi), \p{W_{\chz \omega}}(\chi)}$. \end{description} \ncase{$\plonk$ verifier $\verifier(\srs, \inp, \zkproof)$}\ \newline The \plonk{} verifier works as follows \begin{description} \item[Step 1] Validate all obtained group elements. \item[Step 2] Validate all obtained field elements. \item[Step 3] Validate the instance $\inp = \smallset{\wit_i}_{i = 1}^\instsize$. \item[Step 4] Compute challenges $\beta, \gamma, \alpha, \chz, v, u$ from the transcript. \item[Step 5] Compute zero polynomial evaluation $\p{Z_H} (\chz) =\chz^\noofc - 1$. \item[Step 6] Compute Lagrange polynomial evaluation $\lag_1 (\chz) = \frac{\chz^\noofc -1}{\noofc (\chz - 1)}$. \item[Step 7] Compute public input polynomial evaluation $\pubinppoly (\chz) = \sum_{i \in \range{1}{\instsize}} \wit_i \lag_i(\chz)$. \item[Step 8] Compute quotient polynomials evaluations \begin{multline*} \p{t} (\chz) = \frac{1}{\p{Z_H}(\chz)} \Big( \p{r} (\chz) + \pubinppoly(\chz) - (\p{a}(\chz) + \beta \p{S_{\sigma 1}}(\chz) + \gamma) (\p{b}(\chz) + \beta \p{S_{\sigma 2}}(\chz) + \gamma) \\ (\p{c}(\chz) + \gamma)\p{z}(\chz \omega) \alpha - \lag_1 (\chz) \alpha^2 \Big) \,. \end{multline*} \item[Step 9] Compute batched polynomial commitment $\gone{D} = v \gone{r} + u \gone {z}$ that is \begin{align*} \gone{D} & = v \left( \begin{aligned} & \p{a}(\chz)\p{b}(\chz) \cdot \gone{\selmulti} + \p{a}(\chz) \gone{\selleft} + \p{b} \gone{\selright} + \p{c} \gone{\seloutput} + \\ & + ( (\p{a}(\chz) + \beta \chz + \gamma) (\p{b}(\chz) + \beta k_1 \chz + \gamma) (\p{c} + \beta k_2 \chz + \gamma) \alpha + \lag_1(\chz) \alpha^2) + \\ % & \\ & - (\p{a}(\chz) + \beta \p{S_{\sigma 1}}(\chz) + \gamma) (\p{b}(\chz) + \beta \p{S_{\sigma 2}}(\chz) + \gamma) \alpha \beta \p{z}(\chz \omega) \gone{\p{S_{\sigma 3}}(\chi)}) \end{aligned} \right) + \\ & + u \gone{\p{z}(\chi)}\,. \end{align*} \item[Step 10] Computes full batched polynomial commitment $\gone{F}$: \begin{align*} \gone{F} & = \left(\gone{\p{t_{lo}}(\chi)} + \chz^\noofc \gone{\p{t_{mid}}(\chi)} + \chz^{2 \noofc} \gone{\p{t_{hi}}(\chi)}\right) + u \gone{\p{z}(\chi)} + \\ & + v \left( \begin{aligned} & \p{a}(\chz)\p{b}(\chz) \cdot \gone{\selmulti} + \p{a}(\chz) \gone{\selleft} + \p{b}(\chz) \gone{\selright} + \p{c}(\chz) \gone{\seloutput} + \\ & + ( (\p{a}(\chz) + \beta \chz + \gamma) (\p{b}(\chz) + \beta k_1 \chz + \gamma) (\p{c}(\chz) + \beta k_2 \chz + \gamma) \alpha + \lag_1(\chz) \alpha^2) + \\ % & \\ & - (\p{a}(\chz) + \beta \p{S_{\sigma 1}}(\chz) + \gamma) (\p{b}(\chz) + \beta \p{S_{\sigma 2}}(\chz) + \gamma) \alpha \beta \p{z}(\chz \omega) \gone{\p{S_{\sigma 3}}(\chi)}) \end{aligned} \right) \\ & + v^2 \gone{\p{a}(\chi)} + v^3 \gone{\p{b}(\chi)} + v^4 \gone{\p{c}(\chi)} + v^5 \gone{\p{S_{\sigma 1}(\chi)}} + v^6 \gone{\p{S_{\sigma 2}}(\chi)}\,. \end{align*} \item[Step 11] Compute group-encoded batch evaluation $\gone{E}$ \begin{align*} \gone{E} = \frac{1}{\p{Z_H}(\chz)} & \gone{ \begin{aligned} & \p{r}(\chz) + \pubinppoly(\chz) + \alpha^2 \lag_1 (\chz) + \\ & - \alpha \left( (\p{a}(\chz) + \beta \p{S_{\sigma 1}} (\chz) + \gamma) (\p{b}(\chz) + \beta \p{S_{\sigma 2}} (\chz) + \gamma) (\p{c}(\chz) + \gamma) \p{z}(\chz \omega) \right) \end{aligned} }\\ + & \gone{v \p{r}(\chz) + v^2 \p{a}(\chz) + v^3 \p{b}(\chz) + v^4 \p{c}(\chz) + v^5 \p{S_{\sigma 1}}(\chz) + v^6 \p{S_{\sigma 2}}(\chz) + u \p{z}(\chz \omega) }\,. \end{align*} \item[Step 12] Check whether the verification % $\vereq_\zkproof(\chi)$ equation holds \begin{multline} \label{eq:ver_eq} \left( \gone{\p{W_{\chz}}(\chi)} + u \cdot \gone{\p{W_{\chz \omega}}(\chi)} \right) \bullet \gtwo{\chi} - %\\ \left( \chz \cdot \gone{\p{W_{\chz}}(\chi)} + u \chz \omega \cdot \gone{\p{W_{\chz \omega}}(\chi)} + \gone{F} - \gone{E} \right) \bullet \gtwo{1} = 0\,. \end{multline} The verification equation is a batched version of the verification equation from \cite{AC:KatZavGol10} which allows the verifier to check openings of multiple polynomials in two points (instead of checking an opening of a single polynomial at one point). \end{description} \ncase{$\plonk$ simulator $\simulator_\chi(\srs, \td= \chi, \inp)$}\ \newline The \plonk{} simulator proceeds as an honest prover would, except: \begin{enumerate} \item In the first round, it sets $\wit = (\wit_i)_{i \in \range{1}{3 \noofc}} = \vec{0}$, and at random picks $b_1, \ldots, b_9$. Then it proceeds with that all-zero witness. \item In Round 3, it computes polynomial $\pt(X)$ honestly, however uses trapdoor $\chi$ to compute commitments $\p{t_{lo}}(\chi), \p{t_{mid}}(\chi), \p{t_{hi}}(\chi)$. \end{enumerate} \section{Additional preliminaries, lemmas and proofs} \subsection{Dlog assumptions} \label{sec:dlog_assumptions} \begin{definition}[$(q_1, q_2)\mhyph\dlog$ assumption] Let $\adv$ be a $\ppt$ adversary that gets as input $\gone{1, \chi, \ldots, \chi^{q_1}}, \gtwo{1, \chi, \ldots, \chi^{q_2}}$, for some randomly picked $\chi \in \FF_p$, then \[ \condprob{\chi \gets \adv(\gone{1, \chi, \ldots, \chi^{q_1}}, \gtwo{1, \chi, \ldots, \chi^{q_2} })}{\chi \sample \FF_p} \leq \negl. \] \end{definition} \begin{definition}[$(q_1, q_2)\mhyph\ldlog$ assumption] Let $\adv$ be a $\ppt$ adversary that gets as input $\gone{\chi^{-q_1}, \ldots, 1, \chi, \ldots, \chi^{q_1}}, \gtwo{\chi^{-q_2}, \ldots, 1, \chi, \ldots, \chi^{q_2}}$, for some randomly picked $\chi \in \FF_p$, then \[ \condprob{\chi \gets \adv(\gone{\chi^{-q_1}, \ldots, 1, \chi, \ldots, \chi^{q_1}}, \gtwo{\chi^{-q_2}, \ldots, 1, \chi, \ldots, \chi^{q_2} })}{\chi \sample \FF_p} \leq \negl. \] \end{definition} \subsection{Uber assumption} \label{sec:uber_assumption} \ourpar{BBG uber assumption.} Also, to be able to show computational honest verifier zero knowledge of $\plonk$ in the standard model, what is required by our reduction, we rely on the \emph{uber assumption} introduced by Boneh et al.~\cite{EC:BonBoyGoh05} as presented by Boyen in \cite{PAIRING:Boyen08}. Let $r, s, t, c \in \NN \setminus \smallset{0}$, Consider vectors of polynomials $\pR \in \FF_p[X_1, \ldots, X_c]^r$, $\pS \in \FF_p[X_1, \ldots, X_c]^s$ and $\pT \in \FF_p[X_1, \ldots, X_c]^t$. Write $\pR = \left( \p{r}_1, \ldots, \p{r}_r \right)$, $\pS = \left( \p{s}_1, \ldots, \p{s}_s \right)$ and $\pT = \left( \p{t}_1, \ldots, \p{t}_t \right)$ for polynomials $\p{r}_i, \p{s}_j, \p{t}_k$. For a function $f$ and vector $(x_1, \ldots, x_c)$ we write $f(\pR)$ to denote application of $f$ to each element of $\pR$, i.e. \( f(\pR) = \left( f(\p{r}_1 (x_1, \ldots, x_c), \ldots, f(\p{r}_r (x_1, \ldots, x_c) \right). \) Similarly for applying $f$ to $\pS$ and $\pT$. \begin{definition}[Independence of $\pR, \pS, \pT$] \label{def:independence} Let $\pR, \pS, \pT$ be defined as above. We say that polynomial $\p{f} \in \FF_p[X_1, \ldots, X_c]$ is \emph{dependent} on $\pR, \pS, \pT$ if there exists $rs + t$ constants $a_{i, j}, b_k$ such that $ \p{f} = \sum_{i = 1}^{r} \sum_{j = 1}^{s} a_{i, j} \p{r}_i \p{s}_j + \sum_{k = 1}^{t} b_k \p{t}_k. $ We say that $\p{f}$ is \emph{independent} if it is not dependent. \end{definition} To show (standard-model) zero knowledge of $\plonk$ we utilize a generalization of Boneh-Boyen-Goh's \emph{uber assumption} \cite{EC:BonBoyGoh05} stated as follows (the changed element has been put into a \dbox{dashbox}) \begin{definition}[$(\pR, \pS, \pT, \p{F}, 1)$-uber assumption] \label{def:uber_assumption} Let $\pR, \pS, \pT$ be defined as above, $(x_1, \ldots, x_c, y_1, \ldots, y_{d}) \sample \FF_p^{c + d}$ and let $\p{F}$ be a cardinality-$d$ set of pair-wise independent polynomials which are also independent of $(\pR, \pS, \pT)$, cf.~\cref{def:independence}. Then, for any $\ppt$ adversary $\adv$ \begin{multline*} \Pr\left[\adv(\gone{\pR(x_1, \ldots x_c)}, \gtwo{\pS(x_1, \ldots, x_c)}, \gtar{\pT(x_1, \ldots, x_c)}, \dboxed{\gone{\p{F}(x_1, \ldots, x_c)}}) = 1\right] \approx_\secpar \\ \Pr\left[\adv(\gone{\pR(x_1, \ldots x_c)}, \gtwo{\pS(x_1, \ldots, x_c)}, \gtar{\pT(x_1, \ldots, x_c)}, \dboxed{\gone{y_1, \ldots, y_{d}}}) = 1\right]. \end{multline*} \end{definition} Compared to the original uber assumptions, there are two major changes. First, we require not target group $\GRP_T$ elements to be indistinguishable, but elements of $\GRP_1$. Second, Boneh et al.'s assumption works for distinguishers who are given only one challenge polynomial $\p{f}$, i.e.~$\abs{\p{F}} = 1$. We show security of our version of the uber assumption using the generic group model as introduced by Shoup \cite{EC:Shoup97} where all group elements are represented by random binary strings of length $\secpar$. That is, there are random encodings $\xi_1, \xi_2, \xi_T$ which are injective functions from $\ZZ_p^+$ to $\bin^{\secpar}$. We write $\GRP_i = \smallset{\xi_i(x) \mid x \in \ZZ_p^+}$, for $i \in \smallset{1, 2, T}$. For the sake of clarity we denote by $\xi_{i, j}$ the $j$-th encoding in group $\GRP_i$. Let $\p{P}_i = \smallset{p_1, \ldots, p_{\tau_i}} \subset \FF_p[X_1, \ldots, X_n]$, for $i \in \smallset{1, 2, T}, \tau_i, n \in \NN$, be sets of multivariate polynomials. Denote by $\p{P}_i(x_1, \ldots, x_n)$ a set of evaluations of polynomials in $\p{P_i}$ at $(x_1, \ldots, x_n)$. Denote by $L_i = \smallset{(p_j, \xi_{i, j}) \mid j \leq \tau_i}$. Let $\adv$ be an algorithm that is given encodings $\xi_{i, j_i}$ of polynomials in $\p{P}_i$ for $i \in \smallset{1, 2, T}, j_i = \tau_i$. There is an oracle $\oracleo$ that allows to perform $\adv$ the following queries: \begin{description} \item[Group operations in $\GRP_1, \GRP_2, \GRP_T$:] On input $(\xi_{i, j}, \xi_{i, j'}, i, op)$, $j, j' \leq \tau_i$, $op \in \smallset{\msg{add}, \msg{sub}}$, $\oracleo$ sets $\tau'_i \gets \tau_i + 1$, computes $p_{i, \tau'_i} = p_{i, j}(x_1, \ldots, x_n) \pm p_{i, j'}(x_1, \ldots, x_n)$ respectively to $op$. If there is an element $p_{i, k} \in L_i$ such that $p_{i, k} = p_{\tau'_i}$, then the oracle returns encoding of $p_{i, k}$. Otherwise it sets the encoding $\xi_{i, \tau'_i}$ to a new unused random string, adds $(p_{i, \tau'_i}, \xi_{i, \tau'_i})$ to $L_i$, and returns $\xi_{i, \tau'_i}$. \item[Bilinear pairing:] On input $(\xi_{1, j}, \xi_{2, j'})$ the oracle sets $\tau' \gets \tau_T + 1$ and computes $r_{\tau'} \gets p_{i, j}(x_1, \ldots, x_n) \cdot p_{i, j'}(x_1, \ldots, x_n)$. If $r_{\tau'} \in L_T$ then return encoding found in the list $L_T$, else pick a new unused random string and set $\xi_{T, \tau'}$ to it. Return the encoding to the algorithm. \end{description} Given that, we are ready to show security of our variant of the Boneh et al.~uber assumption. The proof goes similarly to the original proof given in \cite{EC:BonBoyGoh05} with minor differences. \begin{theorem}[Security of the uber assumption] \label{thm:uber_assumption} Let $\p{P}_i \in \FF_p[X_1, \ldots, X_n]^{m_i}$, for $i \in \smallset{1, 2, T}$ be $\tau_i$ tuples of $n$-variate polynomials over $\FF_p$ and let $\p{F} \in \FF_p[X_1, \ldots, X_n]^m$. Let $\xi_0, \xi_1, \xi_T$, $\GRP_1, \GRP_2, \GRP_T$ be as defined above. If polynomials $f \in \p{F}$ are pair-wise independent and are independent of $\p{P}_1, \p{P}_2, \p{P}_T$, then for any $\adv$ that makes up to $q$ queries to the GGM oracle holds: \begin{equation*} \begin{split} \left|\, \Pr\left[ \adv\left( \begin{aligned} \xi_1(\p{P}_1(x_1, \ldots, x_n)), \\ \xi_2(\p{P}_2(x_1, \ldots, x_n)), \\ \xi_T(\p{P}_T(x_1, \ldots, x_n)), \\ \xi_{1}(\p{F}_0), \xi_{1}(\p{F}_1) \end{aligned} \right) = b \, \left|\, \begin{aligned} x_1, \ldots, x_n, y_1, \ldots, y_m \sample \FF_p,\\ b \sample \bin, \\ \p{F}_b \gets \p{F}(x_1, \ldots, x_n),\\ \p{F}_{1 - b} \gets (y_1, \ldots, y_m) \end{aligned} \right. \right] - \frac{1}{2} \, \right| \\ \leq \frac{d(q + m_1 + m_2 + m_T + m)^2 }{2p} \end{split} \end{equation*} \end{theorem} \begin{proof} Let $\cdv$ be a challenger that plays with $\adv$ in the following game. $\cdv$ maintains three lists \[ L_i = \smallset{(p_j, \xi_{i, j}) \mid j \in \range{1}{\tau_i}}, \] for $i \in \smallset{1, 2, T}$. Invariant $\tau$ states that $\tau_1 + \tau_2 + \tau_T = \tau + m_1 + m_2 + m$. Challenger $\cdv$ answers $\adv$'s oracle queries. However, it does it a bit differently that the oracle $\oracleo$ would: \begin{description} \item[Group operations in $\GRP_1, \GRP_2, \GRP_T$:] On input $(\xi_{i, j}, \xi_{i, j'}, i, op)$, $j, j' \leq \tau_i$, $op \in \smallset{\msg{add}, \msg{sub}}$, $\cdv$ sets $\tau' \gets \tau_i + 1$, computes $p_{i, \tau'}(X_1, \ldots, X_n) = p_{i, j}(X_1, \ldots, X_n) \pm p_{i, j'}(X_1, \ldots, X_n)$ respectively to $op$. If there is a polynomial $p_{i, k}(X_1, \ldots, X_n) \in L_i$ such that $p_{i, k}(X_1, \ldots, X_n) = p_{\tau'}(X_1, \ldots, X_n)$, then the challenger returns encoding of $p_{i, k}$. Otherwise it sets the encoding $\xi_{i, \tau'}$ to a new unused random string, adds $(p_{i, \tau'}, \xi_{i, \tau'})$ to $L_i$, and returns $\xi_{i, \tau'}$. \item[Bilinear pairing:] On input $(\xi_{1, j}, \xi_{2, j'})$ the challenger sets $\tau' \gets \tau_T + 1$ and computes $r_{\tau'}(X_1, \ldots, X_n) \gets p_{i, j}(X_1, \ldots, X_n) \cdot p_{i, j'}(X_1, \ldots, X_n)$. If $r_{\tau'}(X_1, \ldots, X_n) \in L_T$, $\cdv$ returns encoding found in the list $L_T$. Else it picks a new unused random string and set $\xi_{T, \tau'}$ to it. Finally it returns the encoding to the algorithm. \end{description} After at most $q$ queries to the oracle, the adversary returns a bit $b'$. At that point the challenger $\cdv$ chooses randomly $x_1, \ldots, x_n, y_1 \ldots, y_m$, random bit $b$, and sets $X_i = x_i$, for $i \in \range{1}{n}$, and $Y_i = y_i$, for $i \in \range{1}{m}$; furthermore, $\p{F}_b \gets \p{F}(x_1, \ldots, x_n)$ and $\p{F}_{1 - b} \gets (y_1, \ldots, y_m)$. Note that $\cdv$ simulates perfectly unless the chosen values $x_1, \ldots, x_n, y_1, \ldots, y_m$ result in equalities between polynomial evaluations that are not equalities between the polynomials. That is, the simulation is perfect unless for some $i, j, j'$ holds \[ p_{i, j}(x_1, \ldots, x_n) - p_{i, j'}(x_1, \ldots, x_n) = 0, \] for $p_{i, j}(X_1, \ldots, X_n) \neq p_{i, j'}(X_1, \ldots, X_n)$. Denote by $\bad$ an event that at least one of the three conditions holds. When $\bad$ happens, the answer $\cdv$ gives to $\adv$ differs from an answer that a real oracle would give. We bound the probability that $\bad$ occurs in two steps. First we set $\p{F}_b = \p{F}(X_1, \ldots, X_n)$. Note that symbolic substitutions do not introduce any new equalities in $\GRP_1$. That is, if for all $j, j'$ holds $p_{1, j} \neq p_{1, j'}$, then $p_{1, j} \neq p_{1, j'}$ even after setting $\p{F}_b = \p{F}(X_1, \ldots, X_n)$. This follows since all polynomials in $\p{F}$ are pairwise independent and $\p{F}$ independent on $\p{P}_1, \p{P}_2, \p{P}_T$. Indeed, $p_{1, j} - p_{1, j'}$ is a polynomial of the form \[ \sum_{j = 1}^{m_1}a_j p_{1, j} + \sum_{j = 1}^{m} b_j f_j (X_1, \ldots, X_n), \] for some constants $a_j, b_j$. If the polynomial is non-zero, but setting $\p{F}_b = \p{F}(X_1, \ldots, X_n)$ makes this polynomial vanish, then some $f_k$ must be dependent on some $\p{P}_1, \p{F} \setminus \smallset{f_k}$. Now we set $X_1 \ldots, X_n, \p{F}_{1 - b}$ and bound probability that for some $i$ and $j, j'$ holds $(p_{i, j}(x_1, \ldots, x_n) - p_{i, j'}(x_1, \ldots, x_n) = 0$ for $p_{i, j} \neq p_{i, j'}$. By the construction, the maximum total degree of these polynomials is $d = \max(d_{\p{P}_1}+ d_{\p{P}_2}, d_{\p{P}_T}, d_{\p{F}})$, where $d_f$ is the total degree of some polynomial $f$ and for a set of polynomials $F = \smallset{f_1, \ldots, f_k}$, we write $d_F = \smallset{d_{f_1}, \ldots, d_{f_k}}$. Thus, for a given $j, j'$ probability that a random assignment to $X_1, \ldots, X_n, Y_1, \ldots, Y_n$ is a root of $p_{i, j} - p_{i, j'}$ is, by the Schwartz-Zippel lemma, bounded by $\infrac{d}{p}$, which is negligible. There is at most $2 \cdot {q + m_0 + m_1 + m \choose 2}$ such pairs $p_{i, j}, p_{i, j'}$ we have that \[ \prob{\bad} \leq {q + m_0 + m_1 + m \choose 2} \cdot \frac{2d}{p} \leq (q + m_0 + m_1 + m)^2 \frac{d}{p}. \] As noted, if $\bad$ does not occur then the simulation is perfect. Also the bit $b$ has been chosen independently on the $\adv$'s view, thus $\condprob{b = b'}{\neg \bad} = \infrac{1}{2}$. Hence, \[ \begin{aligned} \prob{b = b'} & \leq \condprob{b = b'}{\neg \bad}(1 - \prob{\bad}) + \prob{\bad} = \frac{1}{2} + \frac{\prob{\bad}}{2} \\ \prob{b = b'} & \geq \condprob{b = b'}{\neq \bad}(1 - \prob{\bad}) = \frac{1}{2} - \frac{\prob{\bad}}{2}. \end{aligned} \] Finally, \[ \abs{\Pr[b = b'] - \frac{1}{2}} \leq \prob{\bad}/2 \leq (q + m_0 + m_1 + m)^2 \frac{d}{2p} \] as required. \end{proof} \subsection{Special simulation-extractability of sigma protocols and forking lemma} \label{sec:forking_lemma} \begin{theorem}[Special simulation extractability of the Fiat--Shamir transform \cite{INDOCRYPT:FKMV12}] Let $\sigmaprot = (\prover, \verifier, \simulator)$ be a non-trivial sigma protocol with unique responses for a language $\LANG \in \npol$. In the random oracle model, the NIZK proof system $\sigmaprot_\fs = (\prover_\fs, \verifier_\fs, \simulator_{\fs})$ resulting by applying the Fiat--Shamir transform to $\sigmaprot$ is special simulation extractable with extraction error $\eta = q/h$ for the simulator $\simulator$. Here, $q$ is the number of random oracle queries and $h$ is the number of elements in the range of $\ro$. \end{theorem} The theorem relies on the following \emph{general forking lemma} \cite{JC:PoiSte00}. \begin{lemma}[General forking lemma, cf.~\cite{INDOCRYPT:FKMV12,CCS:BelNev06}] \label{lem:forking_lemma} Fix $q \in \ZZ$ and a set $H$ of size $h > 2$. Let $\zdv$ be a $\ppt$ algorithm that on input $y, h_1, \ldots, h_q$ returns $(i, s)$, where $i \in\range{0}{q}$ and $s$ is called a \emph{side output}. Denote by $\ig$ a randomised instance generator. We denote by $\accProb$ the probability \[ \condprob{i > 0}{y \gets \ig; h_1, \ldots, h_q \sample H; (i, s) \gets \zdv(y, h_1, \ldots, h_q)}\,. \] Let $\forking_\zdv(y)$ denote the algorithm described in \cref{fig:forking_lemma}, then the probability $\frkProb$ defined as $ \frkProb := \condprob{b = 1}{y \gets \ig; (b, s, s') \gets \forking_{\zdv}(y)} $ holds \[ \frkProb \geq \accProb \brak{\frac{\accProb}{q} - \frac{1}{h}}\,. \] % \begin{figure} \centering \fbox{ \procedure{$\forking_\zdv (y)$} { \rho \sample \RND{\zdv}\\ h_1, \ldots, h_q \sample H\\ (i, s) \gets \zdv(y, h_1, \ldots, h_q; \rho)\\ \pcif i = 0\ \pcreturn (0, \bot, \bot)\\ h'_{i}, \ldots, h'_{q} \sample H\\ (i', s') \gets \zdv(y, h_1, \ldots, h_{i - 1}, h'_{i}, \ldots, h'_{q}; \rho)\\ \pcif (i = i') \land (h_{i} \neq h'_{i})\ \pcreturn (1, s, s')\\ \pcind \pcelse \pcreturn (0, \bot, \bot) }} \caption{Forking algorithm $\forking_\zdv$} \label{fig:forking_lemma} \end{figure} \end{lemma} \subsection{Proof of the generalized forking lemma (\cref{lem:generalised_forking_lemma})} \label{sec:forking_proof} \begin{proof} First denote by $\accProb(y)$ and $\frkProb(y)$ the following probabilities \begin{align*} \accProb(y) & = \condprob{i \neq 0}{h_1, \ldots, h_q \sample H;\ (i, s) \gets \zdv(y, h_1, \ldots, h_q)}\,.\\ \frkProb(y) & = \condprob{b = 1}{(b, \vec{s}) \gets \genforking_{\zdv}^{m}(y, h_1, \ldots, h_q)}\,. \end{align*} We start by claiming that for all $y$ \begin{equation}\label{eq:frkProb_y} \frkProb(y) \geq \frac{\accProb(y)^m}{q^{m - 1}} - \accProb(y) \cdot \left(1 - \frac{h!}{(h - m)! \cdot h^{m}}\right) \end{equation} Then with the expectation taken over $y \sample \ig$, we have \begin{align} \frkProb & = \expected{\frkProb(y)} \geq \expected{\frac{\accProb(y)^m}{q^{m - 1}} - \accProb(y) \cdot \left(1 - \frac{h!}{(h - m)! \cdot h^{m}}\right)} \label{eq:use_eq1}\\ & \geq \frac{\expected{\accProb(y)}^m}{q^{m - 1}} - \expected{\accProb(y)} \cdot \left(1 - \frac{h!}{(h - m)! \cdot h^{m}}\right) \label{eq:by_lemma_jensen}\\ & = \frac{\accProb^m}{q^{m - 1}} - \accProb \cdot \left(1 - \frac{h!}{(h - m)! \cdot h^{m}}\right)\label{eq:by_accProb}\,. \end{align} Where \cref{eq:use_eq1} comes from \cref{eq:frkProb_y}; \cref{eq:by_lemma_jensen} comes from linearity of expected value and \cref{lem:jensen}; and \cref{eq:by_accProb} holds by the fact that $\expected{\accProb(y)} = \accProb$. We now show \cref{eq:frkProb_y}. Denote by $J = \range{1}{m}^2 \setminus \smallset{(j, j)}_{j \in \range{1}{m}}$. For any input $y$, with probabilities taken over the coin tosses of $\genforking_{\zdv}^{m}$ we have \begin{align*} \frkProb (y) & = \prob{i_j = i_{j'} \land i_j \geq 1 \land h_{i_j}^{j} \neq h_{i_{j'}}^{j'} \text{ for } (j, j') \in J} \\ & \geq \prob{i_j = i_{j'} \land i_j \geq 1 \text{ for } (j, j') \in J} %\\ - \prob{i_j \geq 1 \land h_{i_j}^{j} = h_{i_{j'}}^{j'} \text{ for some } (j, j') \in J}\\ & = \prob{i_j = i_{j'} \land i_j \geq 1 \text{ for } (j, j') \in J} - \prob{i_j \geq 1} \cdot \left(1 - \frac{h!}{(h - m)! \cdot h^{m}}\right) \\ & = \prob{i_j = i_{j'} \land i_j \geq 1 \text{ for } (j, j') \in J} - \accProb(y) \cdot \left(1 - \frac{h!}{(h - m)! \cdot h^{m}}\right)\,. \end{align*} Probability that for some $(j, j') \in J$ and $i_j = i_{j'}$ holds $h_{i_j}^{j} \neq h_{i_{j'}}^{j'}$ equals \[ \frac{h \cdot (h - 1) \cdot \ldots \cdot (h - m - 1)}{h^m} = \frac{h!}{(h - m)! \cdot h^m}. \] That is, it equals the number of all $m$-element strings where each element is different divided by the number of all $m$-element strings, where elements are taken from a set of size $h$. It remains to show that $\prob{i_j = i_{j'} \land i_j \geq 1 \text{ for } (j, j') \in J} \geq \infrac{\accProb(y)^m}{q^{m - 1}}$. Let $\RND{\zdv}$ denote the set from which $\zdv$ picks its coins at random. For each $\iota \in \range{1}{q}$ let $X_\iota \colon \RND{\zdv} \times H^{\iota - 1} \to [0, 1]$ be defined by setting $X_\iota(\rho, h_1, \ldots, h_{\iota - 1})$ to \[ \condprob{i = \iota}{h_\iota, \ldots, h_q \sample H; (i, s) \gets \zdv(y, h_1, \ldots, h_q; \rho)} \] for all $\rho \in \RND{\zdv}$ and $h_1, \ldots, h_{\iota - 1} \in H$. Consider $X_\iota$ be a random variable over the uniform distribution on its domain. Then \begin{align*} & \prob{i_j = i_{j'} \land i_j \geq 1 \text{ for } (j, j') \in J} = \sum_{\iota = 1}^{q} \prob{i_1 = \iota \land \ldots \land i_m = \iota} \\ & = \sum_{\iota = 1}^{q} \prob{i_1 = \iota} \cdot \condprob{i_2 = \iota}{i_1 = \iota} \cdot \ldots \cdot \condprob{i_m = \iota}{i_1 = \ldots = i_{m - 1} = \iota} \\ & = \sum_{\iota = 1}^{q} \sum_{\rho, h_1, \ldots, h_{\iota - 1}} X_{\iota} (\rho, h_1, \ldots, h_{\iota - 1})^{m} \cdot \frac{1}{\abs{\RND{\zdv}} \cdot \abs{H}^{\iota - 1}} = \sum_{\iota = 1}^{q} \expected{X_\iota^m} \,. \end{align*} Importantly, $\sum_{\iota = 1}^q \expected{X_{\iota}} = \accProb(y)$. By \cref{lem:jensen} we get \[ \sum_{\iota = 1}^{q} \expected{X_\iota^m} \geq \sum_{\iota = 1}^{q} \expected{X_\iota}^m\,. \] Note that for e.g.~$X_i = 1$, $i \in \range{1}{q}$ the inequality becomes equality, that is, it is tight. We now use the H\"older inequality, cf.~\cref{lem:holder}, for $x_i = \expected{X_i}$, $y_i = 1$, $p = m$, and $q = m/(m - 1)$ obtaining \begin{gather} \left(\sum_{i = 1}^{q} \expected{X_i}\right)^{m} \leq \left(\sum_{i = 1}^{q} \expected{X_i}^m\right) \cdot q^{m - 1}\\ \frac{1}{q^{m - 1}} \cdot \accProb(y)^{m} \leq \sum_{i = 1}^{q} \expected{X_i}^m\,. \end{gather} Finally, we get \[ \frkProb(y) \geq \frac{\accProb(y)^m}{q^{m - 1}} - \accProb(y) \cdot \left(1 - \frac{h!}{(h - m)! \cdot h^m}\right)\,. \] \qed \end{proof} \begin{lemma}\label{lem:jensen} Let $\RND{\zdv}$ denote the set from which $\zdv$ picks its coins at random. For each $\iota \in \range{1}{q}$ let $X_\iota \colon \RND{\zdv} \times H^{\iota - 1} \to [0, 1]$ be defined by setting $X_\iota(\rho, h_1, \ldots, h_{\iota - 1})$ to \[ \condprob{i = \iota}{h_\iota, \ldots, h_q \sample H; (i, s) \gets \zdv(y, h_1, \ldots, h_q; \rho)} \] for all $\rho \in \RND{\zdv}$ and $h_1, \ldots, h_{\iota - 1} \in H$. Consider $X_\iota$ as a random variable over the uniform distribution on its domain. Then $\expected{X_\iota^m} \geq \expected{X_\iota}^m$. \end{lemma} \begin{proof} First we recall the Jensen inequality \cite{W:Weissten20}, if for some random variable $X$ holds $\abs{\expected{X}} \leq \infty$ and $f$ is a Borel convex function then \[ f(\expected{X}) \leq \expected{f(X)}\,. \] Finally, we note that $\abs{\expected{X}} \leq \infty$ and taking to the $m$-th power is a Borel convex function on $[0, 1]$ interval. \qed \end{proof} \begin{lemma}[H\"older's inequality. Simplified.]\label{lem:holder} Let $x_i, y_i$, for $i \in \range{1}{q}$, and $p, q$ be real numbers such that $1/p + 1/q = 1$. Then \begin{equation} \label{eq:tightness} \sum_{i = 1}^{q} x_i y_i \leq \left(\sum_{i = 1}^{q} x_i^p\right)^{\frac{1}{p}} \cdot \left(\sum_{i = 1}^{q} y_i^p\right)^{\frac{1}{q}}\,. \end{equation} \end{lemma} \begin{remark}[Tightness of the H\"older inequality] In is important to note that Inequality (\ref{eq:tightness}) is tight. More precisely, for $\expected{X_i} = x$, $i \in \range{1}{q}$ we have \begin{gather*} \sum_{i = 1}^q x = \left(\sum_{i = 1}^{q} x^m\right)^\frac{1}{m} \cdot \left(\sum_{i = 1}^{q} 1^{\frac{m}{m - 1}}\right)^{\frac{m - 1}{m}} \\ qx = \left(qx^m\right)^\frac{1}{m} \cdot q^{\frac{m - 1}{m}} \\ (qx)^m = qx^m \cdot q^{m - 1} \\ (qx)^m = (qx)^m\,. \end{gather*} \end{remark} \begin{lemma} \label{lem:root_prob} Let $\p{f}(X)$ be a random degree-$d$ polynomial over $\FF_p[X]$. Then the probability that $\p{f}(X)$ has roots in $\FF_p$ is at least $\infrac{1}{d!}$. \end{lemma} \begin{proof} First observe that there is $p^{d}$ canonical polynomials in $\FF_p[X]$. Each of the polynomials may have up to $d$ roots. Consider polynomials which are reducible to polynomials of degree $1$, i.e.~polynomials that have all $d$ roots. The roots can be picked in $\bar{C}^{p}_{d}$ ways, where $\bar{C}^{n}_{k}$ is the number of $k$-elements combinations with repetitions from $n$-element set. That is, \[ \bar{C}^n_k = \binom{n + k - 1}{k}\,. \] Thus, the probability that a randomly picked polynomial has all $d$ roots is \begin{multline*} p^{-d} \cdot \bar{C}^p_d = p^{-d} \cdot \binom{p + d - 1}{d} = p^{-d} \cdot \frac{(p + d - 1)!}{(p + d - 1 - d)! \cdot d!} = \\ p^{-d} \cdot \frac{(p + d - 1) \cdot \ldots \cdot p \cdot (p - 1)!}{(p - 1)! \cdot d!} = p^{-d} \cdot \frac{(p + d - 1)\cdot \ldots \cdot p}{d!} \\ \geq p^{-d} \cdot {\frac{p^d}{d!}} = \frac{1}{d!} \end{multline*} \qed \end{proof} \input{updatable} \section{From a late updatable SE to SE} \michals{13.09}{Here I want to show an idea of a reduction of a late updatable SE to SE} Let $\plonk$ be a forking simulation extractable proof system. Let $\bdv$ be a SE adversary. Let $\adv$ be an \emph{algebraic} LUSE adversary. We show that existence of an extractor for $\bdv$, $\ext_\bdv$, assures existence of $\ext_\adv$ -- extractor for $\adv$. Here we simplify $\plonk$ a bit and assume that polynomials are written in standard bases instead of Lagrange. That is, for instance, $\p{a} (X) = \sum_{i = 0}^n a_i X^i + \ZERO_\HHH (X) (b_1 + b_2 X)$ The argument goes as follows. Let $\srs_0 \to \srs_1 \to \ldots \srs_n$ be the sequence of SRS co-produced by $\adv$. Let $\srs_k$ be the last SRS honestly computed. The adversary $\bdv$ proceeds as follows: \begin{enumerate} \item Get as input an SRS $srs$ honestly created using some trapdoor $\chi$. \item Internally run adversary $\adv$. Process $\adv$'s SRS update queries. \item Guess index $k$ of the last honest update, set $\srs_k = \srs$. (From now on we will denote this SRS by $\srs$.) \item Let $\adv$ update $\srs$ according to its wishes. Let's the final SRS be $\srs_n$ computed using some trapdoor $\alpha \chi$. ($\alpha$ picked by $\adv$) \item Since $\adv$ is algebraic, $\bdv$ learns $\alpha$. \item $\bdv$ processes $\adv$ simulation queries (that should be done according to $\srs_n$). \begin{enumerate} \item On instance and witness $\wit'$ (note that in $\plonk$ the instance is a part of the witness) compute $\wit$ such that $\wit_i = \alpha^i \wit_i$, for $i = 1 .. (n - 1)$; ($\bdv$ gives as instance/witness the coefficients of some polynomial $\p{\tilde{a}'}$ which commitment is its evaluation at $\alpha \chi$. We need to express it the basis related to $\chi$ as the obtained polynomial $\p{\tilde{a}}$ will be committed at $\chi$. We observe $\p{\tilde{a}'} (\alpha \chi) = \p{\tilde{a}} (\chi)$, for some $\p{\tilde{a}}$ we are now computing) $\wit_{i + n} = \alpha^i \wit_{i + n}$, $\wit_{i + 2n} = \alpha^i \wit_{i+ 2n}$ (the last two translations are since we need to compute $\p{\tilde{b}}$ from $\p{\tilde{b}'}$ and $\p{\tilde{c}}$ from $\p{\tilde{c}'}$) \item Provide $\wit$ to the simulator. \item Get simulated proof $\gone{\p{a} (\chi), \p{b} (\chi), \p{c} (\chi)}$ , $\beta, \gamma$, $\p{z} (\chi)$, $\alpha$, $\p{t} (\chi)$, $\chz$, $\p{a} (\chz), \ldots, \p{t} (\chz), \p{z} (\chz \omega)$, $\delta$. The simulator (which uses trapdoor, that's fine here and simplifies things) simply takes random polynomials $\p{a}, \p{b}, \p{c}$, random challenges, compute $\p{z}, \p{t}$ in regard with the picked polynomials and challenges. Commits to polynomials. $\simulator$ uses trapdoor to commit to $\p{t}$ (it is infeasible for an adversary which doesn't know the instance's witness.) \item Translate the proof. \begin{enumerate} \item Since simulator for $\adv$ would pick random polynomials $\p{a'}, \p{b'}, \p{c'}$ and send $\gone{\p{a'} (\alpha \chi), \p{b'} (\alpha \chi), \p{c'} (\alpha \chi)}$. Just send $\p{a} (\chi), \p{b} (\chi), \p{c} (\chi)$. That is $\p{a'} (\alpha \chi) = \p{a} (\chi)$ and $\p{a'} (\alpha X) = a'_0 + a'_1( \alpha X) + \ldots + a'_n (\alpha^n X^n)= a_0 + a_1 \alpha X + \ldots + (a_n \alpha^n) X^n$. Output commitments to primed polynomials. \item The polynomials are the same, hence RO answers $\beta, \gamma$ are the same as well. \item Polynomial $\p{z}$ can also be picked by the simulator at random, hence $\p{z'} (\alpha \chi) := \p{z} (\chi)$. Output $\gone{\p{z'} (\alpha \chi)}$ \item Again, the random oracle response is the same: $\alpha$. \item Since $\p{t'} (X)$ is determined by $\p{a'} (X), \p{b'} (X), \p{c'} (X), \p{z'} (X)$ and some publicly known polynomials, and $\gone{\p{a'} (\alpha \chi)} = \gone{\p{a} (\chi)}, \ldots$ we set $\p{t'} (\alpha \chi) = \p{t} (\chi)$ and output $\gone{\p{t'} (\alpha \chi)}$. \item Get evaluation challenge $\chz$ and compute $\chz' = \chz \alpha$ (need to adjust evaluation point to have the same evaluation values for primed and non-primed polynomials). \item Get evaluations $\p{a} (\chz), \ldots$; output them as evaluations $\p{a'} (\chz'), \ldots$. \item Get the opening challenge $\nu$ \emph{program $\adv$'s oracle to output $\nu$} \michals{13.09}{we changed the partial transcript hence we need to program the oracle. Is that a problem?} \item Compute evaluations' openings. Observe that (here simplification -- we show correctness of evaluation to $\p{a}$ but batched version should work similarly) \end{enumerate} \begin{align} W_{\chz} (\chi) & = \frac{\p{a} (\chi) - \p{a} (\chz)}{\chi - \chz} = \\ & \frac{\p{a'} (\alpha \chi) - \p{a'} (\alpha \chz)}{\chi - \chz} = \\ & \alpha \frac{\p{a'} (\alpha \chi) - \p{a'} (\alpha \chz)}{\alpha \chi - \alpha \chz} \\ & = \alpha W_{\chz \alpha} (\alpha \chi). \end{align} Hence $\bdv$ sends to $\adv$ opening $W_{\chz \alpha} (\alpha \chi) = \frac{1}{\alpha} W_{\chz} (\chi)$. \item Also, the verification of the correctness of the opening holds. \michals{13.09}{Note, for the batched version we need to query random oracle to get batching coefficients. Here we need to program RO again, to have the same coeffs.} \end{enumerate} \item Now we need to show that proof output by $\adv$ for $\srs_n$ can be translated to a proof in $\srs$. This is done similarly to the above. All RO queries $\adv$ makes to create the proof are answered honestly by $\bdv$ except the query that gives challenge $\chz'$. More precisely, when $\adv$ passes a partial transcript produced by $\adv$ to get $\chz'$ it gets RO's answer $\chz$ and set $\chz' = \alpha \chz$. Unique response property assures that the output proof will not share the first 3 rounds with some simulated proof. Hence we do not need to worry about mismatch between the programmed random oracle and the real random oracle. (That is, the fact that $\bdv$ programmed RO to have challenge $\chz'$ instead of $\chz$ will not be noticed.) Given proof $\zkproof' = \gone{\p{a'} (\alpha \chi), \p{b'} (\alpha \chi), \p{c'} (\alpha \chi)} \ldots$ for instance $\inp'$, output by $\adv$ adversary $\bdv$ proceeds as follows \begin{enumerate} \item Translate $\inp'$ into a corresponding relation in $\srs$: $\inp$. More precisely for known $\wit'_i$ set $\wit_i = \wit'_i \alpha^i$, $\wit_{i + n} = \wit'_{i + n} \alpha^i$, $\wit_{i + 2n} = \wit'_{i + 2n} \alpha^i$ for $i \in \range{1}{\noofc}$. \item Get commitments $\gone{\p{a'} (\alpha \chi), \p{b'} (\alpha \chi), \p{c'} (\alpha \chi)}$ and pass them to the random oracle as $\gone{\p{a} (\chi), \p{b} (\chi), \p{c} (\chi)}$, get challenge $\beta, \gamma$. \item Add the commitments and challenges to the proof $\zkproof$. \item Set $\beta' = \beta$ and $\gamma' = \gamma$. Pass the challenges to $\adv$. \item Get commitment $\gone{\p{z'} (\alpha \chi)}$ and pass it as $\gone{\p{z} (\chi)}$, get challenge $\alpha$. \item Add the commitments and challenges to the proof $\zkproof$. \item Set $\alpha' = \alpha$ and pass it to $\adv$. \item Get commitments $\gone{\p{t'_{lo}} (\alpha \chi), \p{t'_{mid}} (\alpha \chi), \p{t'_{hi}} (\alpha \chi)}$ and pass them the random oracle as $\gone{\p{t_{lo}} (\chi), \p{t_{mid}} (\chi), \p{t_{hi}} (\chi)}$. Get challenge $\chz$. \item Add the commitments and challenges to the proof $\zkproof$. \item Set $\chz' = \alpha \chz$ and give $\chz'$ to $\adv$. \item Get evaluations $\p{a'} (\chz'), \ldots$. \item Add evaluations to the proof $\zkproof$. \item Pass the partial transcript to the random oracle and get challenge $\nu$. \item Set $\nu' = \nu$ and pass it to $\adv$. \item Get polynomial openings $\gone{W_{\chz'} (\alpha \chi)}$ and $\gone{W_{{\chz'} \omega} (\alpha \chi)}$. \item Set $\gone{W_\chz (\chi)} = \gone{\alpha W_{\chz'} (\alpha \chi)}$, and $\gone{W_{\chz \omega} (\chi)} = \gone{\alpha W_{{\chz'} \omega} (\alpha \chi)}$. \end{enumerate} \end{enumerate} Since the $\adv$'s proof is acceptable, $\bdv$'s proof is acceptable as well. Hence there is an extractor $\ext_\bdv$ that outputs witness $\wit$ given: $\bdv$, its randomness $r_\bdv$, $Q$ -- the list of simulated proofs, $Q_\ro$ -- the list of random oracle responses. $\ext_\adv$ is constructed as follows: $r_\adv = r_\bdv$, $Q'$ -- is a list of simulated proofs, but w.r.t.~translations $\bdv$ made, $Q'_\ro$ is a list of random oracle responses, but, as in the case of $Q'$, with changes introduced by $\bdv$. \michals{20.09}{The following is for the setting when there is anhonest SRS at the begining and then the adversary updates it once.} \newcommand{\inpa}{\inp_\adv} \newcommand{\inpb}{\inp_\bdv} \newcommand{\inps}{\inp_\simulator} \newcommand{\zkproofa}{\zkproof_\adv} \newcommand{\zkproofb}{\zkproof_\bdv} \newcommand{\zkproofs}{\zkproof_\simulator} \newcommand{\tra}{\trans_\adv} \newcommand{\trb}{\trans_\bdv} \begin{theorem} Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an $(2\mu + 1)$-message $\ur{k}$ FSE zkSNARK compiled from an AHP proof system with KZG as the commitment scheme. Let $i_\chz$ be an index of a round where $\prover$ gets its evaluation challenge $\chz$, $i_\chz > k$. Then $\proofsystem$ is also USE. \end{theorem} \begin{proof} Let $\adv$ be an SE adversary. Since the SNARK is SE, then for $\adv$ there exists extractor $\ext_\adv$ which takes as input $r_\adv$ -- $\adv$'s randomness, $Q$ -- list of $\adv$ queries to the simulator oracle $\simO$ and their responses, $Q_\ro$ -- list of $\adv$'s queries to $\ro$ and its responses. Let $\bdv$ be a USE adversary. We show how to build extractor $\ext_\bdv$ using $\ext_\adv$. Extractor $\ext_\bdv$ takes the following input $r_\bdv$ -- $\bdv$'s randomness, $Q$ -- list of $\bdv$'s simulator oracle queries and its responses, $Q_\ro$ -- list of $\bdv$'s random oracle queries and its responses, $Q_\srs$ -- list of $\bdv$'s update oracle queries and its responses. Denote by $\srs$ the SRS for $\adv$ and by $\srs'$ the updated SRS. Let $\chi$ be a trapdoor for $\srs$ and $\alpha \chi$ be a trapdoor for $\srs'$. As previously, we denote by $q$ the upper bound of the number of random oracle queries $\bdv$ can make. Note that a zkSNARK compiled from an AHP consists of the following: \begin{itemize} \item Commitments to polynomials $P$ sent by the prover; we denote the list of these by $C$. \item Challenges $\beta_i$ sent by the verifier; the corresponding list is denoted by $B$. \item Evaluation challenge $\chz$ sent by the verifier; \item Polynomial evaluations at $\chz$ and proofs of correct evaluations sent by the prover. The list of the former is denoted by $E$ and of the latter by $W$. \end{itemize} We denote by index $i$ the round when the particular proof element is sent. For example for commitments sent in the second round we write $C_2$. Here we also additionally require that the set of polynomials sent by the prover in the first round encodes the statement's witness and are masked using vanishing set. \michals{23.09}{Formalize it} In the following we denote elements sent w.r.t.~SRS $\srs$ without \emph{apostrophe} and w.r.t~$\srs'$ \emph{with} an apostrophe. % To distinguish instances and proofs used by $\bdv$ and $\adv$ from the final instance and proof output by $\bdv$ ($\adv$) we denote the latter by $(\inpb, \zkproofb)$ (resp.~($\inpa, \zkproofa$)). Eventually, $\adv$ and $\bdv$ outputs proofs, we denote by $\tra$ and $\trb$ partial transcripts of those. Idea for the proof goes as follows. We define adversary $\adv$ that internally runs adversary $\bdv$. Then we show existance of extractor $\ext_\bdv$ using existance of $\ext_\adv$. $\adv$ responds $\bdv$'s queries it answers as follows: \begin{enumerate} \item Set $Q_m = \emptyset$, $\tra = \emptyset$. \item Guess indices $I = \smallset{i_1, \ldots, i_k}$ of random oracle queries used to compute the final proof $\zkproofb$. (We allow $\adv$ to guess this list adaptively.) \item \label{it:used-transcript} If query $x$ has index in $I$ then \begin{enumerate} % \item Parse instance $\inpb$ from $x$; \item Parse the last message $m$ in $x$. \begin{itemize} \item If $\trb$ is a partial transcript for an SRS $\srs$ then add $m$ to $\tra$ \item If $\trb$ is a partial transcript for an SRS $\srs$: \begin{inparaenum}[(a)] \item if $m$ is the proven instance, then add $m / \alpha$ to $\tra$; \item if $m \in C$, then add $m$ to $\tra$; \item if $m \in E$ then add $m \alpha$; \item if $m \in W$ then add $m / \alpha$. \end{inparaenum} \end{itemize} \item Compute $y \gets \ro(\tra)$; \item Append $y$ to $\tra$. \item Return $y$. \end{enumerate} % \item \label{it:evaluation-challenge} On a random oracle query $x$ on a partial transcript to get evaluation challenge $\chz$, pass the query, get answer $\chz$ and return to $\bdv$ challenge $\chz' \gets \alpha \chz$. \item \label{it:partial-transcript} If there is a partial transcript $t$ of a proof in $Q_m$, such that $t = x$, pick a random element $y$ and set $\ro(x) = y$. \item On other random oracle queries, pass the queries to $\ro$ and returns its answer. \item On $\bdv$'s request to see simulated proof for $(\srs, \inp, \wit)$, $\adv$ passes the query to its $\simO$ and returns its answer $\zkproof$. \item On $\bdv$ request to see simulated proof for $(\srs', \inp', \wit')$, $\adv$ does the following: \begin{enumerate} \item $(\inp, \wit) \gets \MoveInstanceBackward(\srs', \inp', \wit')$ \item Ask $\simO$ for a simulated proof $\zkproof$ for $(\inp, \wit)$ \item $\zkproof' \gets \MoveProofForward(\srs, \inp, \zkproof)$ \item Return $\zkproof'$ \item Add all partial transcripts of $(\inp, \zkproof)$, $(\inp', \zkproof')$ to $Q_m$ \end{enumerate} \item On $\bdv$'s final proof $\zkproofb$, $\adv$ outputs $\zkproofa \gets \tra$. %$(\srs', \inp', \zkproof')$, $\adv$ % proceeds as follows: % \begin{enumerate} % \item $(\inp, \wit) \gets \MoveInstanceBackward(\srs', \inp', \wit')$ % \item $\zkproof \gets \MoveProofBackward (\srs', \inp', \zkproof')$ % \item Return $(\srs, \inp, \zkproof)$ % \end{enumerate} % \item On $\bdv$'s proof $(\srs, \inp, \zkproof)$, $\adv$ returns $(\srs, \inp, \zkproof)$. \end{enumerate} We show that probability that $\adv$ fails in outputting an acceptable proof is negligible by a series of games. \ncase{Game 0} In this game the adversary $\bdv$ wins if it outputs an acceptable instance and proof $\inpb, \zkproofb$ such that $\ext_\bdv$ fails to extract the corresponding witness with non-negligible probability. This is a standard USE winning condition for $\bdv$. \ncase{Game 1} In this game the environment aborts if the proof $\zkproofb$ adversary outputs utilizes a programmed random oracle response. %has the same first message as a first message of some simulated proof $\zkproofs$. \ncase{Game 0 to Game 1} Note that the adversary $\bdv$ could output a final proof $\zkproofb$ that utilizes a query programmed by $\adv$, cf.~\cref{it:partial-transcript}. In that case, $\adv$ could not return such a proof as it is not valid, because the challenges are computed incorrectly. We argue that probability that $\bdv$ outputs a valid proof with a programmed RO output is negligible. Note that the only possibility for $\bdv$ to utilize a programmed random oracle response while producing an acceptable proof is to have partial proof matching the partial simulated proof. More precisely, if the used programmed challenge $\beta$ is after $m$-th message, then $\zkproofb[0..m] = \zkproofs[0..m]$, for some simulated proof $\zkproofs$. Importantly, $m < k$, otherwise the adversary breaks $k$ unique response property. Obviously, if $\zkproofb[0..m] = \zkproofs[0..m]$ then $\zkproofb[0..m'] = \zkproofs[0..m']$ for $m' < m$. We show that it is infeasible to $\bdv$ to output $\zkproofb$ such that $\zkproofb[0..1] = \zkproofs[0..1]$. This is done by showing a reduction to a hiding property of the polynomial commitment scheme. More precisely, let $\rdv$ be a reduction that utilizes $\bdv$ to break the hiding property, it proceeds as follows: \begin{compactenum} \item Get $\srs_{\PCOM}$ and compute proof system's SRS $\srs$, present the SRS to $\bdv$. \item Get instance $c$ -- a commitment to unknown polynomial. \item Set the adversary $\adv$ proceedsing as above and oracles: random oracle and simulator oracle for $\adv$. \item For random oracle queries response honestly, i.e.~by picking random elements, for simulator oracle response with simulated proof computed using trapdoor-less simulator. \item Guess which simulated proof $\zkproofs$ will be utilized in the final proof $\bdv$ eventually outputs $\zkproofb$. Denote $\zkproofs$ instance by $\inps$. \item When $\adv$ asks $\simO$ for a simulated proof for $\inps$, (i.e.~$\bdv$ asks for $\inps$ w.r.t.~SRS $\srs$) or $\inps / \alpha$ (i.e.~$\bdv$ asks for $\inps$ w.r.t.~$\srs'$ and $\adv$ translates it into $\inp / \alpha$ to make it work with $\srs$) include $c$ to the first message of the simulated proof. \item Eventually, $\bdv$ asks for evaluation challenge $\chz$ and outputs a proof $\zkproofb$. \item Since the proof $\bdv$ output was acceptable, $\bdv$ correctly evaluated the polynomials sent in the first round. Especially, it evaluated polynomial that commits to $c$. \item \label{it:rewind} Rewind $\bdv$ up to the point challenge $\chz$ is presented and picks another challenge $\chz'$. If for $\chz'$ adversary $\bdv$ does not output a proof for $\inpb$ -- rewind the adversary and pick another evaluation challenge. \item Eventually, get as many evaluation as necessary to interpolate the polynomial in $c$. Reveal and return the polynomial. \end{compactenum} We note that since the probability that $\bdv$ outputs a valid proof is non-negligible we can use forking lemma and conclude that with overwhelming probability $\rdv$ will get necessary number of transcripts in polnomial time, cf.~\cref{it:rewind}. Hence the probability that the adversary wins in Game 0, but not in Game 1 is negligible. We now show that $\bdv$ wins Game 1 with negligible probability. More precisely, we show how $\ext_\bdv$ is constructed. Since $\proofsystem$ is FSE there exists an extractor $\ext_\adv(\srs, r_\adv, Q_\adv, Q_\adv^\ro)$ for $\adv$ that returns witness $\wit$ such that $\REL(\srs, \inp, \wit) = 1$. Extractor $\ext_\bdv ((\srs, \srs'), r_\bdv, Q_\bdv, Q^\ro_\bdv, Q^\srs_\bdv)$ proceeds as follows: \begin{itemize} \item Set $r_\adv = r_\bdv$, \item Set $Q_\adv = $ \item Set $Q_\adv^\ro = $ \end{itemize} \ncase{Probability of guessing $I$ correctly} Rough lower-bound for the probability that $\adv$ guesses correctly all indices in $I$, what is necessary for $\adv$ to succeed, is $q^{-\mu}$. Fortunatelly, this bound can be make much tighter. Observe, that if $\proofsystem$ is $\ur{k}$, then $\adv$ can tell, after $k$-th challenge, whether the random oracle query given by $\bdv$ is for the final proof or not -- after $k$-th challenge $\bdv$'s messages are already determined by the previous messages and challenges. Hence, probability that $\adv$ guesses $I$ correctly is at least $q^{-k}$. % \ncase{Extractor $\ext_\bdv$} Since $\proofsystem$ is FSE there exists an extractor $\ext_\adv(\srs, r_\adv, Q_\adv, Q_\adv^\ro)$ for $\adv$ that returns witness $\wit$ such that $\REL(\srs, \inp, \wit) = 1$. We show existance of extractor $\ext_\bdv$. More precisely $\ext_\bdv ((\srs, \srs'), r_\bdv, Q_\bdv, Q^\ro_\bdv, Q^\srs_\bdv)$ proceeds as follows: % \begin{itemize} % \item Set $r_\adv = r_\bdv$, % \end{itemize} \begin{figure} \centering \begin{pcvstack}[center,boxed] \begin{pchstack} \procedure{$\MoveInstanceBackward (\srs', \inp', \wit', \alpha)$} { \pcreturn (\inp, \wit) \gets (\inp', \wit') \cdot \alpha. } \end{pchstack} % \pcvspace \begin{pchstack} \procedure{$\MoveProofForward (\srs, \inp, \zkproof, \alpha)$} { \inp' \gets \inp / \alpha \\ \zkproof'.C \gets \zkproof.C\\ \zkproof'.B \gets \zkproof.B\\ \zkproof'.\chz \gets \zkproof.\chz / \alpha\\ \zkproof'.E \gets \zkproof.E\\ \zkproof'.W \gets \zkproof.W / \alpha \\ \pcreturn \zkproof'} \pchspace \procedure{$\MoveProofBackward (\srs', \inp', \zkproof', \alpha)$} { \inp \gets \inp' \cdot \alpha\\ \zkproof.C \gets \zkproof'.C\\ \zkproof.B \gets \zkproof'.B\\ \zkproof.\chz \gets \zkproof'.\chz \cdot \alpha\\ \zkproof.E \gets \zkproof'.E\\ \zkproof.W \gets \zkproof'.W \cdot \alpha \\ \pcreturn \zkproof} \end{pchstack} \end{pcvstack} \caption{Moving elements between SRS-s. \michals{23.09}{I think that figure may not be needed now.}} \label{fig:operations} \end{figure} \end{proof} \begin{definition}[Witness-first proof system] \newcommand{\encode}{\pcalgostyle{Encode}} \newcommand{\decode}{\pcalgostyle{Decode}} We say that an AHP NIZK proof system $\proofsystem$ is \emph{witness-first} if \begin{compactenum} \item there are efficient algorithms $\encode, \decode$ such that for instance $\inp$, witness $\wit$, polynomials $P = \{\p{p_1}, \ldots, \p{p_k}\}$ sent by the prover in the first round: \begin{compactitem} \item $\encode(\inp, \wit) = P$, \item $\decode(P) = (\inp, \wit)$. \end{compactitem} \item The proof contains evaluations of each of polynomials in $P$ at evaluation challenge $\chz$. \end{compactenum} \end{definition} That is, given polynomials sent by the prover in the first round one can efficiently compute instance's witness. \begin{lemma}[$k$-unique response property to $1$-unique response property] Let $\proofsystem$ be witness-first proof system compiled from AHP $\PCOM$ polynomial commitment scheme. Let $P$ be a set of polynomials that are committed to by a prover in the first round. Let $\chz$ be an evaluation challenge that is given to the prover in Round $k'$, for some $k' > k$. Then ... \end{lemma} \begin{proof} \end{proof} \section{Alternative proof of Thm.~2} \begin{definition}[$(\eps(\secpar), k,n)$-forking soundness] \changedm{Let $\proofsystem = (\kgen, \prover, \verifier, \simulator)$ be an $(2 \mu + 1)$-message proof system for a relation $\REL$. For any $\ppt$ adversary $\advse^{\ro} (\srs; r)$ we consider the procedure $\zdv$ that provided the transcript $(\srs, \adv, r, Q_{H})$ and $h_1, \ldots, h_q$ runs $\adv$ by providing it with random oracle queries and simulated proofs. While $Q_{H}$ is consistent with $h_1, \ldots, h_q$, it replays the proofs of $Q$. % $\zdv$ returns the index $i$ of the random oracle query made for challenge $k$ and the proof $\adv$ returns Consider the algorithm $\genforking_{\zdv}^{n}$ that rewinds $\zdv$ to produce a $(1,\dots, n,\dots, 1)$-tree of transcripts such that none of the $n$ challenges in round $k$ were used in simulated proofs. We say that $\ps$ is $(\eps(\secpar), k,n)$-forking sound if for any PPT adversary the probability that \begin{align*} \Pr\left[ \REL(\inp, \wit) = 0 \,\Biggl|\, \begin{aligned} & \srs \sample \kgen(\REL), r \sample \RND{\advse}, (\inp_{\advse}, \zkproof_{\advse}) \gets \advse^{\ro} (\srs; r), \\ & (1, \tree) \gets \genforking_{\zdv}^{m}((\srs,\adv,r, Q_{H}),Q_{H}), \wit \gets \extt(\tree) \end{aligned} \right] \leq \eps(\secpar). \end{align*} List $Q_\ro$ contains all $\advse$'s queries to $\ro$ and $\ro$'s answers.} \end{definition} \begin{theorem}[Forking simulation-extractable multi-message protocols] \label{thm:se} Let $\ps = (\kgen, \prover, \verifier, \simulator)$ be an interactive $(2 \mu + 1)$-message proof system for $\RELGEN(\secparam)$ that is honest verifier zero-knowledge in the standard model\footnote{Crucially, we require that one can provide an indistinguishable simulated proof without any additional knowledge, as e.g~knowledge of a SRS trapdoor.}, has $\ur{k}$ property with security $\epsur(\secpar)$, and is $(\epss(\secpar), k, n)$-forking sound. % for % $n_i = 1, i \in \range{1}{\mu} \setminus \smallset{k}$ and $n_k = n$. % Let $\ro\colon \bin^{*} \to \bin^{\secpar}$ be a random oracle. Then $\psfs$ is forking simulation-extractable with extraction error $\epsur(\secpar)$ against $\ppt$ algebraic adversaries that makes up to $q$ random oracle queries and returns an acceptable proof with probability at least $\accProb$. The extraction probability $\extProb$ is at least \( \extProb \geq \frac{1}{q^{n - 1}} (\accProb - \epsur(\secpar))^{n} -\eps(\secpar)\,, \) for some negligible $\eps(\secpar)$. \end{theorem} \begin{proof} \ngame{0} This is a simulation extraction game played between an adversary $\advse$ who has given access to a random oracle $\ro$ and simulator $\psfs.\simulator$. There is also an extractor $\ext$ that, from a proof $\zkproof_{\advse}$ for instance $\inp_{\advse}$ output by the adversary and from transcripts of $\advse$'s operations is tasked to extract a witness $\wit_{\advse}$ such that $\REL(\inp_{\advse}, \wit_{\advse})$ holds. $\advse$ wins if it manages to produce an acceptable proof and the extractor fails to reveal the corresponding witness. In the following game hops we upper-bound the probability that this happens. \ngame{1} This is identical to $\game{0}$ except that now the game is aborted if there is a simulated proof $\zkproof_\simulator$ for $\inp_{\advse}$ such that $(\inp_{\advse}, \zkproof_\simulator[1..k]) = (\inp_{\advse}, \zkproof_{\advse}[1..k])$. That is, the adversary in its final proof reuses at least $k$ messages from a simulated proof it saw before and the proof is acceptable. Denote that event by $\event{\errur}$. \ncase{Game 0 to Game 1} $\prob{\event{\errur}} \leq \epsur(\secpar)$. The proof goes exactly as in \cref{thm:simsnd}. \COMMENT{We have, \( \prob{\game{0} \land \nevent{\errur}} = \prob{\game{1} \land \nevent{\errur}} \) and, from the difference lemma, cf.~\cref{lem:difference_lemma}, \[ \abs{\prob{\game{0}} - \prob{\game{1}}} \leq \prob{\event{\errur}}\,. \] Thus, to show that the transition from one game to another introduces only minor change in probability of $\advse$ winning it should be shown that $\prob{\event{\errur}}$ is small. We can assume that $\advse$ queried the simulator on the instance it wishes to output---$\inp_{\advse}$. We show a reduction $\rdvur$ that utilises $\advse$, who outputs a valid proof for $\inp_{\advse}$, to break the $\ur{k}$ property of $\ps$. Let $\rdvur$ run $\advse$ internally as a black-box: \begin{itemize} \item The reduction answers both queries to the simulator $\psfs.\simulator$ and to the random oracle. It also keeps lists $Q$, for the simulated proofs, and $Q_\ro$ for the random oracle queries. \item When $\advse$ makes a fake proof $\zkproof_{\advse}$ for $\inp_{\advse}$, $\rdvur$ looks through lists $Q$ and $Q_\ro$ until it finds $\zkproof_{\simulator}[0..k]$ such that $\zkproof_{\advse}[0..k] = \zkproof_{\simulator}[0..k]$ and a random oracle query $\zkproof_{\simulator}[k].\ch$ on $\zkproof_{\simulator}[0..k]$. \item $\rdvur$ returns two proofs for $\inp_{\advse}$: \begin{align*} \zkproof_1 = (\zkproof_{\simulator}[1..k], \zkproof_{\simulator}[k].\ch, \zkproof_{\simulator}[k + 1..\mu + 1])\\ \zkproof_2 = (\zkproof_{\simulator}[1..k], \zkproof_{\simulator}[k].\ch, \zkproof_{\advse}[k + 1..\mu + 1]) \end{align*} \end{itemize} If $\zkproof_1 = \zkproof_2$, then $\advse$ fails to break simulation extractability, as $\zkproof_2 \in Q$. On the other hand, if the proofs are not equal, then $\rdvur$ breaks $\ur{k}$-ness of $\ps$. This happens only with negligible probability $\epsur(\secpar)$, hence \( \prob{\event{\errur}} \leq \epsur(\secpar)\,. \) } \ngame{2} \changedm{Define an algorithm $\bdv$ such that given SRS $\srs$ and access to $\ro$, randomness $r_\bdv$ internally runs $\advse^{\simO, \ro} (\srs; r_\adv)$, where \begin{compactenum} \item $r_\bdv$ is split into two substrings $r_\adv$ and $r_\simulator$; \item $\bdv$ answers $\adv$ simulator queries itself by programming the random oracle locally. Coins from $r_\simulator$ are use to that. \item Eventually $\adv$ outputs instance $\inp_\advse$ and proof $\zkproof_\advse$. The same instance and proof are output by $\bdv$. \item $\bdv$ sets up (initially empty) list $Q$ where all simulated proofs that $\adv$ asked for are kept. \end{compactenum} In this game, the environment aborts also when it fails to build a $(1, \ldots, 1, n, 1, \ldots, 1)$-tree of accepting transcripts $\tree$ by rewinding $\bdv$. Denote that event by $\event{\errfrk}$. } \ncase{Game 1 to Game 2} \changedm{ Note that for every acceptable proof $\zkproof_{\advse}$, we may assume that whenever $\advse$ outputs in Round $k$ message $\zkproof_{\advse}[k]$, then the $(\inp_{\advse}, \zkproof_{\advse}[1..k])$ random oracle query was made by the adversary, not the simulator\footnote{\cite{INDOCRYPT:FKMV12} calls these queries \emph{fresh}.}, i.e.~there is no simulated proof $\zkproof_\simulator$ on $\inp_\simulator$ such that $(\inp_{\advse}, \zkproof_{\advse} [1..k]) = (\inp_\simulator, \zkproof_\simulator[1..k])$. Otherwise, the game would be already interrupted by the error event in Game $\game{1}$. As previously, \( \abs{\prob{\game{1}} - \prob{\game{2}}} \leq \prob{\event{\errfrk}}\,. \) We describe our extractor $\ext$ here. The extractor takes as input relation $\REL$, SRS $\srs$, $\bdv$'s code, its randomness $r_\bdv$, the output instance $\inp_{\advse}$ and proof $\zkproof_{\advse}$, and the list of random oracle queries and responses $Q_\ro$. Then, $\ext$ starts a forking algorithm $\genforking^{n}_\zdv(y,h_1, \ldots, h_q)$ for $y = (\srs, \bdv, r_\bdv, \inp_{\advse}, \zkproof_{\advse})$ where we set $h_1, \ldots, h_q$ to be the consecutive queries from list $Q_\ro$. We run $\bdv$ internally in $\zdv$. To assure that in the first execution of $\zdv$ the adversary $\bdv$ produces the same $(\inp_{\advse}, \zkproof_{\advse})$ as in the extraction game, $\zdv$ provides $\bdv$ with the same randomness $r_\bdv$ and answers queries to the random oracle with pre-recorded responses in $Q_\ro$. % Note, that since the view of the adversary when run inside $\zdv$ is the same as its view with access to the real random oracle, it produces exactly the same output. After the first run, $\zdv$ outputs the index $i$ of a random oracle query that was used by $\bdv$ to compute the challenge $\zkproof[k].\ch = \ro(\zkproof_{\advse}[0..k])$ it had to answer in the $(k + 1)$-th round and adversary's transcript, denoted by $s_1$ in $\genforking$'s description. If no such query took place $\zdv$ outputs $i = 0$. Then new random oracle responses are picked for queries indexed by $i, \ldots, q$ and the adversary is rewound to the point just prior to when it gets the response to RO query $\zkproof_{\advse}[0..k]$. The adversary gets a random oracle response from a new set of responses $h^2_i, \ldots, h^2_q$. If the adversary requests a simulated proof after seeing $h^2_i$ then $\zdv$ computes the simulated proof on its own. Eventually, $\zdv$ outputs index $i'$ of a query that was used by the adversary to compute $\ro(\zkproof_{\advse}[0..k])$, and a new transcript $s_2$. $\zdv$ is run $n$ times with different random oracle responses. If a tree $\tree$ of $n$ transcripts is built then $\ext$ runs internally the tree extractor $\extt(\tree)$ and outputs what it returns. We emphasize here the importance of the unique response property. If it does not hold then in some $j$-th execution of $\zdv$ the adversary $\adv$ (run internally in $\bdv$) could reuse a challenge that it learned from observing proofs in $Q$. In that case, $\bdv$ would output a proof that would make $\zdv$ output $i = 0$, making the extractor fail. Fortunately, the case that the adversary breaks the unique response property has already been covered by the abort condition in $\game{1}$. Denote by $\waccProb$ the probability that $\advse$ outputs a proof that is accepted and does not break $\ur{k}$-ness of $\ps$. With the same probability an acceptable proof is returned by $\bdv$. Denote by $\waccProb'$ the probability that algorithm $\zdv$, defined in the lemma, produces an accepting proof with a fresh challenge after Round $k$. Given the discussion above, we can state that $\waccProb = \waccProb'$. Next, from the generalised forking lemma, cf.~\cref{lem:generalised_forking_lemma}, we get that \begin{equation} \begin{split} \prob{\event{\errfrk}} \leq 1 - \waccProb \cdot \left(\infrac{\waccProb^{n - 1}}{q^{n - 1}} + \infrac{(2^\secpar) !}{((2^\secpar - n)! \cdot (2^\secpar)^{n})} - 1\right). \end{split} \end{equation} } \ngame{3} \changedm{ This game is identical to $\game{2}$ except that it aborts if $\extt(\tree)$ run by $\ext$ fails to extract the witness. } \ncase{Game 2 to Game 3} \changedm{ Since $\ps$ is forking-sound the probability that $\extt(\tree)$ fails is upper-bounded by $\epsss(\secpar)$. Since Game $\game{3}$ is aborted when it is impossible to extract the correct witness from $\tree$ and $\bdv$ only passes proofs produced by $\adv$, the adversary $\advse$ cannot win. Thus, by the game-hopping argument, \[ \abs{\prob{\game{0}} - \prob{\game{4}}} \leq 1 - \left(\frac{\waccProb^{n}}{q^{n - 1}} + \waccProb \cdot \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot (2^\secpar)^{n}} - \waccProb\right) + \epsur(\secpar) + %q_{\ro}^{\mu} \epss + \epsss(\secpar)\,. \] Thus the probability that extractor $\extss$ succeeds is at least \[ \frac{\waccProb^{n}}{q^{n - 1}} + \waccProb \cdot \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot (2^\secpar)^{n}} - \waccProb - \epsur(\secpar) %- q_{\ro}^{\mu} \epss - \epsss(\secpar)\,. \] Since $\waccProb$ is probability of $\advse$ outputting acceptable transcript that does not break $\ur{k}$-ness of $\ps$, then $\waccProb \geq \accProb - \epsur(\secpar)$, where $\accProb$ is the probability of $\advse$ outputing an acceptable proof as defined in \cref{def:simext}. It thus holds \begin{equation} \label{eq:frk} \extProb \geq \frac{(\accProb - \epsur(\secpar))^{n}}{q^{n - 1}} - \underbrace{(\accProb - \epsur(\secpar)) \cdot \left( 1 - \frac{(2^\secpar) !}{(2^\secpar - n)! \cdot (2^\secpar)^{n}}\right) - \epsur(\secpar) - % q_{\ro}^{\mu} \epss - \epsss(\secpar)}_{\eps(\secpar)}\,. \end{equation} Note that the part of \cref{eq:frk} denoted by $\eps(\secpar)$ is negligible as $\epsur(\secpar), \epsss(\secpar)$ are negligible, and $\frac{(2^\secpar) !}{(2^\secpar - n)! \cdot (2^\secpar)^{n}} \geq \left(\infrac{(2^\secpar - n)}{2^\secpar}\right)^{n}$ is overwhelming. Thus, \[ \extProb \geq q^{-(n - 1)} (\accProb - \epsur(\secpar))^{n} -\eps(\secpar)\,. \] and $\psfs$ is forking simulation extractable with extraction error $\epsur(\secpar)$. } \qed \end{proof} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End:
{ "alphanum_fraction": 0.6609757598, "avg_line_length": 50.9986553115, "ext": "tex", "hexsha": "fc0c83b1029b66d1e722f1247e7e9e47d424dafd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "7da7fa2b6aa17142ef8393ace6aa532f3cfd12b4", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "clearmatics/research-plonkext", "max_forks_repo_path": "old_plonkext/plonkext.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "7da7fa2b6aa17142ef8393ace6aa532f3cfd12b4", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "clearmatics/research-plonkext", "max_issues_repo_path": "old_plonkext/plonkext.tex", "max_line_length": 594, "max_stars_count": null, "max_stars_repo_head_hexsha": "7da7fa2b6aa17142ef8393ace6aa532f3cfd12b4", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "clearmatics/research-plonkext", "max_stars_repo_path": "old_plonkext/plonkext.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 78108, "size": 227556 }
\chapter{Statistics Core Functions(StatCore)}
{ "alphanum_fraction": 0.8085106383, "avg_line_length": 15.6666666667, "ext": "tex", "hexsha": "42617e31ac466de8cb14de99b1460e1352d284bc", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "103a75e6a433c2b873abb7ecd4da675028b782db", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kilasuelika/SciStaLib", "max_forks_repo_path": "Documentation/StatCore.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "103a75e6a433c2b873abb7ecd4da675028b782db", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kilasuelika/SciStaLib", "max_issues_repo_path": "Documentation/StatCore.tex", "max_line_length": 45, "max_stars_count": null, "max_stars_repo_head_hexsha": "103a75e6a433c2b873abb7ecd4da675028b782db", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kilasuelika/SciStaLib", "max_stars_repo_path": "Documentation/StatCore.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 10, "size": 47 }
\subsection*{Computer Vision System} \textbf{ID:} 01 \\ \textbf{Titel:} \emph{Line Detection} \begin{itemize} \item Given a distinctly colored line along the floor the computer vision system should be able to track it and feed back an accurate measurement of how much the robot deviates from said line. \end{itemize} \textbf{ID:} 02 \\ \textbf{Titel:} \emph{Color Detection} \begin{itemize} \item Camera input should be filterable by RGB pixel values. Objects within certain color ranges should be detectable. \end{itemize} \textbf{ID:} 03 \\ \textbf{Titel:} \emph{QR Code Identification} \begin{itemize} \item The system should be able to recognize and read the contents of QR Codes. \end{itemize} \textbf{ID:} 04 \\ \textbf{Titel:} \emph{Real-Time Performance} \begin{itemize} \item It should be possible for the system to keep up with and process a continuous video stream in real-time. \end{itemize} \textbf{ID:} 05 \\ \textbf{Titel:} \emph{Visualization} \begin{itemize} \item An accompanying GUI should exist where the raw video stream can be seen adjacent to a video stream where color and line detection is active. Detected objects should have a border and an object coordinate should be seen on screen. When the system is following a line it should be possible to see how the robot is positioned relative to the line. A measurement of the current deviation should be seen on the screen. \end{itemize}
{ "alphanum_fraction": 0.7543252595, "avg_line_length": 48.1666666667, "ext": "tex", "hexsha": "c6a02a8f41f9cbe18c8807327fd3f3c9fa804cf0", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-11-16T16:06:15.000Z", "max_forks_repo_forks_event_min_datetime": "2020-11-16T16:06:15.000Z", "max_forks_repo_head_hexsha": "d86848a037a07e97122c92e3c80c980c58c41d52", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kottz/D7039E", "max_forks_repo_path": "report/unsafe/sections/edK_cv_requirements.tex", "max_issues_count": 72, "max_issues_repo_head_hexsha": "d86848a037a07e97122c92e3c80c980c58c41d52", "max_issues_repo_issues_event_max_datetime": "2021-01-01T08:06:16.000Z", "max_issues_repo_issues_event_min_datetime": "2020-09-15T13:32:02.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kottz/D7039E", "max_issues_repo_path": "report/unsafe/sections/edK_cv_requirements.tex", "max_line_length": 122, "max_stars_count": null, "max_stars_repo_head_hexsha": "d86848a037a07e97122c92e3c80c980c58c41d52", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kottz/D7039E", "max_stars_repo_path": "report/unsafe/sections/edK_cv_requirements.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 385, "size": 1445 }
\documentclass[]{article} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \usepackage{fixltx2e} % provides \textsubscript \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \else % if luatex or xelatex \ifxetex \usepackage{mathspec} \else \usepackage{fontspec} \fi \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} \fi % use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} % use microtype if available \IfFileExists{microtype.sty}{% \usepackage{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \usepackage[margin=1in]{geometry} \usepackage{hyperref} \hypersetup{unicode=true, pdftitle={615 Pre\_EDA}, pdfborder={0 0 0}, breaklinks=true} \urlstyle{same} % don't use monospace font for urls \usepackage{color} \usepackage{fancyvrb} \newcommand{\VerbBar}{|} \newcommand{\VERB}{\Verb[commandchars=\\\{\}]} \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} % Add ',fontsize=\small' for more characters per line \usepackage{framed} \definecolor{shadecolor}{RGB}{248,248,248} \newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}} \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}} \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\ImportTok}[1]{#1} \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}} \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}} \newcommand{\BuiltInTok}[1]{#1} \newcommand{\ExtensionTok}[1]{#1} \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}} \newcommand{\RegionMarkerTok}[1]{#1} \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}} \newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}} \newcommand{\NormalTok}[1]{#1} \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt} } \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{0} % Redefines (sub)paragraphs to behave more like sections \ifx\paragraph\undefined\else \let\oldparagraph\paragraph \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} \fi \ifx\subparagraph\undefined\else \let\oldsubparagraph\subparagraph \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} \fi %%% Use protect on footnotes to avoid problems with footnotes in titles \let\rmarkdownfootnote\footnote% \def\footnote{\protect\rmarkdownfootnote} %%% Change title format to be more compact \usepackage{titling} % Create subtitle command for use in maketitle \newcommand{\subtitle}[1]{ \posttitle{ \begin{center}\large#1\end{center} } } \setlength{\droptitle}{-2em} \title{615 Pre\_EDA} \pretitle{\vspace{\droptitle}\centering\huge} \posttitle{\par} \author{} \preauthor{}\postauthor{} \date{} \predate{}\postdate{} \begin{document} \maketitle \subsection{Load Package \& Read the Data}\label{load-package-read-the-data} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(tidyverse)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## -- Attaching packages --------------------------------------- tidyverse 1.2.1 -- \end{verbatim} \begin{verbatim} ## √ ggplot2 3.0.0 √ purrr 0.2.5 ## √ tibble 1.4.2 √ dplyr 0.7.6 ## √ tidyr 0.8.1 √ stringr 1.3.1 ## √ readr 1.1.1 √ forcats 0.3.0 \end{verbatim} \begin{verbatim} ## -- Conflicts ------------------------------------------ tidyverse_conflicts() -- ## x dplyr::filter() masks stats::filter() ## x dplyr::lag() masks stats::lag() \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(scales) } \end{Highlighting} \end{Shaded} \begin{verbatim} ## ## Attaching package: 'scales' \end{verbatim} \begin{verbatim} ## The following object is masked from 'package:purrr': ## ## discard \end{verbatim} \begin{verbatim} ## The following object is masked from 'package:readr': ## ## col_factor \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(readr)} \KeywordTok{library}\NormalTok{(dplyr)} \KeywordTok{library}\NormalTok{(stats)} \KeywordTok{library}\NormalTok{(factoextra) }\CommentTok{# clustering algorithms & visualization} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Welcome! Related Books: `Practical Guide To Cluster Analysis in R` at https://goo.gl/13EFCZ \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(corrplot)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## corrplot 0.84 loaded \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(graphics)} \KeywordTok{library}\NormalTok{(cluster) }\CommentTok{# clustering algorithms} \NormalTok{frm_NCBI <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\StringTok{'framingham.csv'}\NormalTok{) }\CommentTok{#kaggles} \NormalTok{frm_ktrain <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\StringTok{'frmgham2.csv'}\NormalTok{) }\CommentTok{#NIH} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Parsed with column specification: ## cols( ## .default = col_integer(), ## SYSBP = col_double(), ## DIABP = col_double(), ## BMI = col_double() ## ) \end{verbatim} \begin{verbatim} ## See spec(...) for full column specifications. \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{problems}\NormalTok{(frm_NCBI)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # tibble [0 x 4] ## # ... with 4 variables: row <int>, col <int>, expected <chr>, actual <chr> \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{problems}\NormalTok{(frm_ktrain)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## # tibble [0 x 4] ## # ... with 4 variables: row <int>, col <int>, expected <chr>, actual <chr> \end{verbatim} \subsection{Description of the Datasets}\label{description-of-the-datasets} The Framingham Heart Dataset The Framingham Heart Study is one of the longest ongoing cardiovascular cohort study in the world. It started in 1948 and the initial number of adult subjects was 5,209. The study is now its third generation and was conducted on the inhabitants of the city of Framingham Massachusetts. The results from the study have been used to better understand the epidemiology hypertensive or arteriosclerotic cardiovascular disease. Prevailing knowledge/lifestyle factors pertaining to heart disease such as diet, exercise and common medications such as aspirin. \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item The following demographic risk factors were included in the Framingham Heart Dataset:- \item Sex: male or female: \item Age: age of the patient: \item Education: Different education Levels were coded 1 for some high school, 2 for a high school diploma or GED, 3 for some College or vocational school, and 4 for a college degree. \item The data set also includes behavioral risk factors associated with smoking CurrentSmoker: If the patient is a current smoker or not CigsPerDay: The number of cigarettes that the person smoked on average in one day. \item Medical history risk factors: BPMeds: Whether or not the patient was on blood pressure medication PrevalentStroke: Whether or not the patient had previously had a stroke PrevalentHyp: Whether or not the patient was hypertensive Diabetes: Whether or not the patient had diabetes \item Risk factors from the first physical examination of the patient: TotChol: Total cholesterol levels SysBP: Systolic blood pressure DiaBP: Diastolic blood pressure BMI: Body Mass Index HeartRate: Heart rate Glucose: Glucose level TenYearCHD: 10 year risk of coronary heart disease CHD(This extra feature or Variable was only included in the Kaggle Dataset and not the NIH Dataset) \end{enumerate} \subsection{Principal Component Analysis (PCA)}\label{principal-component-analysis-pca} Description of the data exploration/analysis Principal Component Analysis (PCA) A multivariate Analysis begins with having several substantial correlated variables. In such an event, one can use a statistical analysis called Principal Component Analysis (PCA). PCA functions as a dimension-reduction tool that is often used to reduce a large set of variables to a small set that still contains most of the information in the large set. The smaller number of variables in the small set is called principal components. The PCAs are numbered 1,2,3,4\ldots{}. The first PCA value generally accounts for as much of the variability in the data as possible, and each succeeding component accounts for as much of the remaining variability as possible. We did a PCA on The Framingham Heart Datasets that were both obtained from the NIH and Kaggles. Our reason for doing PCA was to find which variables were the most important in the entire dataset: Which variable contributed the most to disease risk all factors considered? After our PCA analysis, we identified that Total Cholestrol levels were the most important variable for this group of people. This can be explained by the diet and lifestyle choices. Foods high in bad cholestrol overall put you one at very high risk for cardiovascular diseases and heart health. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Principal component analysis(PCA) #NIH} \CommentTok{#frm_ktrain <- read.csv('framingham.csv')} \NormalTok{frm_ktrain <-}\StringTok{ }\KeywordTok{read_csv}\NormalTok{(}\StringTok{'frmgham2.csv'}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Parsed with column specification: ## cols( ## .default = col_integer(), ## SYSBP = col_double(), ## DIABP = col_double(), ## BMI = col_double() ## ) \end{verbatim} \begin{verbatim} ## See spec(...) for full column specifications. \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{res.pca <-}\StringTok{ }\KeywordTok{prcomp}\NormalTok{(}\KeywordTok{na.omit}\NormalTok{(frm_ktrain[,}\KeywordTok{c}\NormalTok{(}\DecValTok{2}\OperatorTok{:}\DecValTok{6}\NormalTok{, }\DecValTok{8}\OperatorTok{:}\DecValTok{14}\NormalTok{)], }\DataTypeTok{center =}\NormalTok{ TRU, }\DataTypeTok{scale =} \OtherTok{TRUE}\NormalTok{)) }\CommentTok{#plot1} \KeywordTok{fviz_eig}\NormalTok{(res.pca) }\CommentTok{#plot one} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-2-1.pdf} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{fviz_pca_biplot}\NormalTok{(res.pca) }\CommentTok{#plot two} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-2-2.pdf} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{fviz_pca_ind}\NormalTok{(res.pca) }\CommentTok{#gives a scatter plot. very messy} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-2-3.pdf} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{summary}\NormalTok{(res.pca) ##As you can see, principal components 1 and 2 have the highest standard deviation, use them} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Importance of components: ## PC1 PC2 PC3 PC4 PC5 PC6 ## Standard deviation 45.4627 26.5103 22.7142 12.94025 11.4519 9.23998 ## Proportion of Variance 0.5548 0.1887 0.1385 0.04495 0.0352 0.02292 ## Cumulative Proportion 0.5548 0.7435 0.8820 0.92692 0.9621 0.98505 ## PC7 PC8 PC9 PC10 PC11 PC12 ## Standard deviation 6.35481 3.73987 1.01090 0.46518 0.25779 0.17051 ## Proportion of Variance 0.01084 0.00375 0.00027 0.00006 0.00002 0.00001 ## Cumulative Proportion 0.99589 0.99964 0.99992 0.99997 0.99999 1.00000 \end{verbatim} \subsection{PCA Discussion}\label{pca-discussion} From our analysis of The Framingham Heart Dataset(NIH): The Histogram plot obtained from {[}fviz\_eig(res.pca){]} which plots the highest eigenvectors which are the highest PCA shows that PCA1 explains over 60\% of the variance in the dataset and subsequently PCA2 explains over \%19 and PCA3 explaining the remaining PCAs explaining the remaining variance. Summary Importance of PCAs:The summary shows what we expected from the PCA plot1. PCA which is what we picked to explain the most important variables in the Dataset had the highest standard deviation, proportion of variance and cumulative proportion followed PCA2 which had half of these values. mportance of components: PC1 PC2 PC3 PC4 PC5 PC6 PC7 Standard deviation 45.4627 26.5103 22.7142 12.94025 11.4519 9.23998 6.35481 Proportion of Variance 0.5548 0.1887 0.1385 0.04495 0.0352 0.02292 0.01084 Cumulative Proportion 0.5548 0.7435 0.8820 0.92692 0.9621 0.98505 0.99589 PC8 PC9 PC10 PC11 PC12 Standard deviation 3.73987 1.01090 0.46518 0.25779 0.17051 Proportion of Variance 0.00375 0.00027 0.00006 0.00002 0.00001 Cumulative Proportion 0.99964 0.99992 0.99997 0.99999 1.00000 Plot2: This plot shows: For PCA1: TotalChol is the most important variable to PCA1(Dim2). For PCA2:Glucose, SysBp, DiasBp, HeartRate, BMI and CIGPDAY were the most important variables in PCA2(Dim2) Our reason for doing PCA was to find which variables were the most important in the entire dataset: Which variable contributed the most to disease risk all factors considered? After our PCA analysis, we identified that Total Cholestrol levels were the most important variable for this group of people. This can be explained by the diet and lifestyle choices. Foods high in bad cholestrol overall put you one at very high risk for cardiovascular diseases and heart health. \begin{Shaded} \begin{Highlighting}[] \CommentTok{#VARIATION PLOT for VARIANCE #NIH DATA} \CommentTok{#frm_ktrain <- read_csv('frmgham2.csv') #used this} \CommentTok{#frm_NCBI <- read.csv('framingham.csv') } \NormalTok{var <-}\StringTok{ }\KeywordTok{get_pca_var}\NormalTok{(res.pca)} \KeywordTok{corrplot}\NormalTok{(var}\OperatorTok{$}\NormalTok{cos2, }\DataTypeTok{is.corr=}\OtherTok{FALSE}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-3-1.pdf} \begin{Shaded} \begin{Highlighting}[] \NormalTok{frm_ktrain.clean <-frm_ktrain }\OperatorTok{%>%}\StringTok{ }\KeywordTok{mutate_at}\NormalTok{(}\KeywordTok{vars}\NormalTok{(SEX,CURSMOKE, DIABETES, BPMEDS, educ, PREVCHD, PREVAP, PREVMI, PREVSTRK, PREVHYP, PERIOD, DEATH, } \NormalTok{ ANGINA, HOSPMI, MI_FCHD, ANYCHD, STROKE, CVD, HYPERTEN) , }\KeywordTok{funs}\NormalTok{(}\KeywordTok{factor}\NormalTok{(.)))} \KeywordTok{summary}\NormalTok{(frm_ktrain.clean)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## RANDID SEX TOTCHOL AGE ## Min. : 2448 1:5022 Min. :107.0 Min. :32.00 ## 1st Qu.:2474378 2:6605 1st Qu.:210.0 1st Qu.:48.00 ## Median :5006008 Median :238.0 Median :54.00 ## Mean :5004741 Mean :241.2 Mean :54.79 ## 3rd Qu.:7472730 3rd Qu.:268.0 3rd Qu.:62.00 ## Max. :9999312 Max. :696.0 Max. :81.00 ## NA's :409 ## SYSBP DIABP CURSMOKE CIGPDAY BMI ## Min. : 83.5 Min. : 30.00 0:6598 Min. : 0.00 Min. :14.43 ## 1st Qu.:120.0 1st Qu.: 75.00 1:5029 1st Qu.: 0.00 1st Qu.:23.09 ## Median :132.0 Median : 82.00 Median : 0.00 Median :25.48 ## Mean :136.3 Mean : 83.04 Mean : 8.25 Mean :25.88 ## 3rd Qu.:149.0 3rd Qu.: 90.00 3rd Qu.:20.00 3rd Qu.:28.07 ## Max. :295.0 Max. :150.00 Max. :90.00 Max. :56.80 ## NA's :79 NA's :52 ## DIABETES BPMEDS HEARTRTE GLUCOSE educ ## 0:11097 0 :10090 Min. : 37.00 Min. : 39.00 1 :4690 ## 1: 530 1 : 944 1st Qu.: 69.00 1st Qu.: 72.00 2 :3410 ## NA's: 593 Median : 75.00 Median : 80.00 3 :1885 ## Mean : 76.78 Mean : 84.12 4 :1347 ## 3rd Qu.: 85.00 3rd Qu.: 89.00 NA's: 295 ## Max. :220.00 Max. :478.00 ## NA's :6 NA's :1440 ## PREVCHD PREVAP PREVMI PREVSTRK PREVHYP TIME PERIOD ## 0:10785 0:11000 0:11253 0:11475 0:6283 Min. : 0 1:4434 ## 1: 842 1: 627 1: 374 1: 152 1:5344 1st Qu.: 0 2:3930 ## Median :2156 3:3263 ## Mean :1957 ## 3rd Qu.:4252 ## Max. :4854 ## ## HDLC LDLC DEATH ANGINA HOSPMI MI_FCHD ## Min. : 10.00 Min. : 20.0 0:8100 0:9725 0:10473 0:9839 ## 1st Qu.: 39.00 1st Qu.:145.0 1:3527 1:1902 1: 1154 1:1788 ## Median : 48.00 Median :173.0 ## Mean : 49.37 Mean :176.5 ## 3rd Qu.: 58.00 3rd Qu.:205.0 ## Max. :189.00 Max. :565.0 ## NA's :8600 NA's :8601 ## ANYCHD STROKE CVD HYPERTEN TIMEAP TIMEMI ## 0:8469 0:10566 0:8728 0:2985 Min. : 0 Min. : 0 ## 1:3158 1: 1061 1:2899 1:8642 1st Qu.:6224 1st Qu.:7212 ## Median :8766 Median :8766 ## Mean :7242 Mean :7594 ## 3rd Qu.:8766 3rd Qu.:8766 ## Max. :8766 Max. :8766 ## ## TIMEMIFC TIMECHD TIMESTRK TIMECVD ## Min. : 0 Min. : 0 Min. : 0 Min. : 0 ## 1st Qu.:7050 1st Qu.:5598 1st Qu.:7295 1st Qu.:6004 ## Median :8766 Median :8766 Median :8766 Median :8766 ## Mean :7543 Mean :7008 Mean :7661 Mean :7166 ## 3rd Qu.:8766 3rd Qu.:8766 3rd Qu.:8766 3rd Qu.:8766 ## Max. :8766 Max. :8766 Max. :8766 Max. :8766 ## ## TIMEDTH TIMEHYP ## Min. : 26 Min. : 0 ## 1st Qu.:7798 1st Qu.: 0 ## Median :8766 Median :2429 ## Mean :7854 Mean :3599 ## 3rd Qu.:8766 3rd Qu.:7329 ## Max. :8766 Max. :8766 ## \end{verbatim} \subsection{Discussion}\label{discussion} We plotted a different plot so as to better visualize the significance of these variables after a PCA analysis. The plot confirms our previous analysis and results where Total Cholestrol levels were identified to be the most important variable for this group of people in regards to PCA1. SysBP and Glucose captured the remaining variance in PCA2 while CIGPERDAY was significant in PCA3 and last of all HeartRate was significant in PCA4. As a rule of thumb, we go with the top two or three variables which capture most of the variance in the dataset. The reason for Total Cholestrol levels being high could be explained by dietary factors for this particular group: Foods high in bad cholestrol overall put you one at very high risk for cardiovascular diseases and heart health. It could be that the consume a diet high in cholestrol and do little to lower it. \begin{Shaded} \begin{Highlighting}[] \CommentTok{#my favorite one} \CommentTok{#Panel correlation for NIH Dataset(frm_ktrain)} \CommentTok{#gives correlation between variables} \NormalTok{panel.cor <-}\StringTok{ }\ControlFlowTok{function}\NormalTok{(x, y, }\DataTypeTok{digits =} \DecValTok{2}\NormalTok{, }\DataTypeTok{prefix =} \StringTok{""}\NormalTok{, cex.cor, ...)} \NormalTok{\{} \NormalTok{ usr <-}\StringTok{ }\KeywordTok{par}\NormalTok{(}\StringTok{"usr"}\NormalTok{); }\KeywordTok{on.exit}\NormalTok{(}\KeywordTok{par}\NormalTok{(usr))} \KeywordTok{par}\NormalTok{(}\DataTypeTok{usr =} \KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\DecValTok{1}\NormalTok{, }\DecValTok{0}\NormalTok{, }\DecValTok{1}\NormalTok{))} \NormalTok{ r <-}\StringTok{ }\KeywordTok{abs}\NormalTok{(}\KeywordTok{cor}\NormalTok{(x, y))} \NormalTok{ txt <-}\StringTok{ }\KeywordTok{format}\NormalTok{(}\KeywordTok{c}\NormalTok{(r, }\FloatTok{0.123456789}\NormalTok{), }\DataTypeTok{digits =}\NormalTok{ digits)[}\DecValTok{1}\NormalTok{]} \NormalTok{ txt <-}\StringTok{ }\KeywordTok{paste0}\NormalTok{(prefix, txt)} \ControlFlowTok{if}\NormalTok{(}\KeywordTok{missing}\NormalTok{(cex.cor)) cex.cor <-}\StringTok{ }\FloatTok{0.8}\OperatorTok{/}\KeywordTok{strwidth}\NormalTok{(txt)} \KeywordTok{text}\NormalTok{(}\FloatTok{0.5}\NormalTok{, }\FloatTok{0.5}\NormalTok{, txt, }\DataTypeTok{cex =}\NormalTok{ cex.cor }\OperatorTok{*}\StringTok{ }\NormalTok{r)} \NormalTok{\}} \CommentTok{#frm_ktrain <- read_csv('frmgham2.csv')} \KeywordTok{pairs}\NormalTok{( }\OperatorTok{~}\NormalTok{TOTCHOL}\OperatorTok{+}\NormalTok{AGE}\OperatorTok{+}\NormalTok{SYSBP}\OperatorTok{+}\NormalTok{DIABP}\OperatorTok{+}\NormalTok{BMI}\OperatorTok{+}\NormalTok{HEARTRTE }\OperatorTok{+}\NormalTok{GLUCOSE}\OperatorTok{+}\NormalTok{CIGPDAY, }\DataTypeTok{data=}\NormalTok{frm_ktrain, }\DataTypeTok{na.action=}\NormalTok{na.omit,} \DataTypeTok{lower.panel =}\NormalTok{ panel.smooth,} \DataTypeTok{upper.panel =}\NormalTok{ panel.cor)} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-4-1.pdf} \#\# Correlation Plot Reason for Analysis: We generated a correlation plot. The purpose of the correlation plot was to show how much one variable is affected by another. The relationship between two variables is called their correlation. Results: From the results of our analysis, it was clear that SYSBP and DIASBP are the most highly positively correlated variables in the entire dataset.That means if one increased then the other would increase and vice-versa. The correlation value was 0.72. However, SYSBP is a better variable for predicting cardiovacular health than DIABP. The next slightly correlated variables were Age and SysBP which had a low correlation value of 0.38. The DIABP and BMI had an almost similar value of 0.34. This confirms the assumption that as you grow older, you run a slightly higher risk of increasing your SYSBP. Discussion: Bloop Pressure: A patients Blood Pressure reading is taken in two readings. A brief description from the CDC webiste states that the first number, called systolic blood pressure, measures the pressure in your blood vessels when your heart beats. The second number, called diastolic blood pressure, measures the pressure in your blood vessels when your heart rests between beats. The general assumption is that as one increases the other would consequentially increase. The correlation plots confirmed this assumption. The next slightly correlated variables were Age and SysBP which had a low correlation value of 0.38. The DIABP and BMI had an almost similar value of 0.34. This confirms the assumption that as you grow older, you run a slightly higher risk of increasing your SYSBP. It makes sense because older population have higher cases of High Blood pressure as opposed to younger populations. \subsection{PCA FOR KAGGLES DATASET WITH LESS VARIABLES}\label{pca-for-kaggles-dataset-with-less-variables} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Principal component analysis #Kaggles} \NormalTok{frm_NCBI <-}\StringTok{ }\KeywordTok{read.csv}\NormalTok{(}\StringTok{'framingham.csv'}\NormalTok{) }\CommentTok{#kaggles} \NormalTok{res.pca <-}\StringTok{ }\KeywordTok{prcomp}\NormalTok{(}\KeywordTok{na.omit}\NormalTok{(frm_NCBI[,}\KeywordTok{c}\NormalTok{(}\DecValTok{2}\OperatorTok{:}\DecValTok{6}\NormalTok{, }\DecValTok{8}\OperatorTok{:}\DecValTok{14}\NormalTok{)], }\DataTypeTok{center =}\NormalTok{ TRU, }\DataTypeTok{scale =} \OtherTok{TRUE}\NormalTok{))} \KeywordTok{fviz_eig}\NormalTok{(res.pca) }\CommentTok{#plot one} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-5-1.pdf} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{fviz_pca_biplot}\NormalTok{(res.pca) }\CommentTok{#plot two} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-5-2.pdf} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{fviz_pca_ind}\NormalTok{(res.pca) }\CommentTok{#gives a scatter plot. very messy} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-5-3.pdf} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{summary}\NormalTok{(res.pca) ##As you can see, principal components 1 and 2 have the highest standard deviation, use them} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Importance of components: ## PC1 PC2 PC3 PC4 PC5 PC6 ## Standard deviation 44.5331 23.5903 12.51630 11.27559 7.90686 6.21363 ## Proportion of Variance 0.6746 0.1893 0.05329 0.04325 0.02127 0.01313 ## Cumulative Proportion 0.6746 0.8639 0.91723 0.96048 0.98175 0.99488 ## PC7 PC8 PC9 PC10 PC11 PC12 ## Standard deviation 3.71413 0.99459 0.32717 0.31287 0.16153 0.15499 ## Proportion of Variance 0.00469 0.00034 0.00004 0.00003 0.00001 0.00001 ## Cumulative Proportion 0.99958 0.99991 0.99995 0.99998 0.99999 1.00000 \end{verbatim} \section{VARIATION PLOT for VARIANCE \#Kaggles DATA}\label{variation-plot-for-variance-kaggles-data} \section{\texorpdfstring{frm\_NCBI \textless{}- read.csv(`framingham.csv') \#Kaggles change to this}{frm\_NCBI \textless{}- read.csv(framingham.csv) \#Kaggles change to this}}\label{frm_ncbi---read.csvframingham.csv-kaggles-change-to-this} \begin{Shaded} \begin{Highlighting}[] \CommentTok{#VARIATION PLOT for VARIANCE #Kaggles DATA} \CommentTok{#frm_NCBI <- read.csv('framingham.csv') #Kaggles change to this} \NormalTok{var <-}\StringTok{ }\KeywordTok{get_pca_var}\NormalTok{(res.pca)} \KeywordTok{corrplot}\NormalTok{(var}\OperatorTok{$}\NormalTok{cos2, }\DataTypeTok{is.corr=}\OtherTok{FALSE}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{PCAforNIH_files/figure-latex/unnamed-chunk-6-1.pdf} \begin{Shaded} \begin{Highlighting}[] \NormalTok{frm_NCBI.clean <-frm_NCBI }\OperatorTok{%>%}\StringTok{ }\KeywordTok{mutate_at}\NormalTok{(}\KeywordTok{vars}\NormalTok{(male,age,education,currentSmoker,cigsPerDay,BPMeds,prevalentStroke,prevalentHyp,diabetes,totChol,sysBP,diaBP,BMI,heartRate,glucose,TenYearCHD) , }\KeywordTok{funs}\NormalTok{(}\KeywordTok{factor}\NormalTok{(.)))} \KeywordTok{summary}\NormalTok{(frm_NCBI.clean)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## male age education currentSmoker cigsPerDay ## 0:2420 40 : 192 1 :1720 0:2145 0 :2145 ## 1:1820 46 : 182 2 :1253 1:2095 20 : 734 ## 42 : 180 3 : 689 30 : 218 ## 41 : 174 4 : 473 15 : 210 ## 48 : 173 NA's: 105 10 : 143 ## 39 : 170 (Other): 761 ## (Other):3169 NA's : 29 ## BPMeds prevalentStroke prevalentHyp diabetes totChol ## 0 :4063 0:4215 0:2923 0:4131 240 : 85 ## 1 : 124 1: 25 1:1317 1: 109 220 : 70 ## NA's: 53 260 : 62 ## 210 : 61 ## 232 : 59 ## (Other):3853 ## NA's : 50 ## sysBP diaBP BMI heartRate ## 120 : 107 80 : 262 22.19 : 18 75 : 563 ## 130 : 102 82 : 152 22.54 : 18 80 : 385 ## 110 : 96 85 : 137 22.91 : 18 70 : 305 ## 115 : 89 70 : 135 23.48 : 18 60 : 231 ## 125 : 88 81 : 131 23.09 : 16 85 : 228 ## 124 : 84 84 : 122 (Other):4133 (Other):2527 ## (Other):3674 (Other):3301 NA's : 19 NA's : 1 ## glucose TenYearCHD ## 75 : 193 0:3596 ## 77 : 167 1: 644 ## 73 : 156 ## 80 : 153 ## 70 : 152 ## (Other):3031 ## NA's : 388 \end{verbatim} \subsection{Discussion}\label{discussion-1} It was interesting to note that for the kaggles dataset that had less variables overall and one new variable that predicts the person risks for TenYearCHD.Total Cholestrol overall was the most important variable in PCA1. SYSBP the most important variable in PCA2 that captured the remaining variance that PCA1 did not capture: PCA1 accounts for most of the variance. It was also interesting to note that upon adding GLUCOSE as a variable like in the entire dataset with all the variables considered, glucose was an important predictor for cardiovascular diseases. This confrims our assumptions and prevailing knowledge that chronic high glucose levels putting you at a higher risk for DIABETES AND High Blood Pressure diseases. \end{document}
{ "alphanum_fraction": 0.6674597708, "avg_line_length": 44.0579310345, "ext": "tex", "hexsha": "44ea330a33a01b6f8e8d8fc2e30a10fd9174c5ac", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "34c2785df3e9df86a4ca16c0d14a20d308caf55f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Carol-P18/Understanding-Epidemiology-of-the-Heart-Disease-using-The-Framingham-Heart-Study-Data-", "max_forks_repo_path": "PCAforNIH.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "34c2785df3e9df86a4ca16c0d14a20d308caf55f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Carol-P18/Understanding-Epidemiology-of-the-Heart-Disease-using-The-Framingham-Heart-Study-Data-", "max_issues_repo_path": "PCAforNIH.tex", "max_line_length": 383, "max_stars_count": null, "max_stars_repo_head_hexsha": "34c2785df3e9df86a4ca16c0d14a20d308caf55f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Carol-P18/Understanding-Epidemiology-of-the-Heart-Disease-using-The-Framingham-Heart-Study-Data-", "max_stars_repo_path": "PCAforNIH.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 10229, "size": 31942 }
\documentclass{ctexart} \usepackage{xcolor} \usepackage{listings} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{graphicx} \graphicspath{{images/}} \usepackage[backend=biber, style=numeric, sorting=ynt]{biblatex} \addbibresource{refs.bib} \usepackage{hyperref} \usepackage{amsmath} \usepackage{subfiles} \title{CornerNet: Detecting Objects as Paired Keypoints} \author{Hei Law, Jia Deng\\ University of Michigan, Ann Arbor} \date{ECCV 2018} \begin{document} \maketitle \begin{abstract} \subfile{sections/abstract} \end{abstract} \section{简介} \subfile{sections/introduction} \section{CornerNet} % \subfile{sections/CornerNet} \printbibliography \end{document}
{ "alphanum_fraction": 0.7886904762, "avg_line_length": 24, "ext": "tex", "hexsha": "9c040ee8061fb507b164129961dede5546635548", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0d57783ecb49c0ce2d7621460cff102caacca2da", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sjzyzz/paper_translation", "max_forks_repo_path": "papers/CornerNet/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0d57783ecb49c0ce2d7621460cff102caacca2da", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sjzyzz/paper_translation", "max_issues_repo_path": "papers/CornerNet/main.tex", "max_line_length": 64, "max_stars_count": null, "max_stars_repo_head_hexsha": "0d57783ecb49c0ce2d7621460cff102caacca2da", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sjzyzz/paper_translation", "max_stars_repo_path": "papers/CornerNet/main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 214, "size": 672 }
\chapter{Methodology} The most important components of this study are the data sets being constructed for each of the bird species. Before talking about the appended climate data and the bird species themselves, it is worthwhile to first go over the given attributes of the Project FeederWatch (PFW) data set. These initial attributes largely determined the format and characteristics of the climate data. \section{Given Attributes of the Project FeederWatch Data} There are a total of 18 different attributes for each tuple in the original FeederWatch data. The complete list of these features is below, and of course not all of them were applicable to this project. These descriptions were obtained through the guidelines provided by PFW. The actual guideline document can be found in Appendix B. \begin{itemize} \item Latitude: The latitude value of the observation location in decimals. No information was given about how this measurement was done or how accurate is it to the actual location. This is relevant to the project as this is used to establish a city location for a given observation. This city location is later used to fetch the climate data. \item Longitude: The longitude value of the observation location in decimals. No information was given about how this measurement was done or how accurate is it to the actual location. Similar to Latitude, this was relevant to the project for establishing a city location. \item ID: This is the identification number of the participant in Project FeederWatch. This is a unique Cornell Lab of Ornithology identification number and it is not available to the public. This value served no purpose in this study. \item StatProv: This is the U.S. state or Canadian province of the observation location. We are only focused on the continental United States and this state information is vital. It is used to establish the city for a given observation. \item Entry Technique: These are the various methods of the identifying the latitude and longitude values for a given observation. This is not relevant to the focus of this study and thus not considered. \item FW Year: This is the FeederWatch season. The seasons run from November to April, thus only covering the winter season. Example, 'PFW--1992' indicates the season running from November 1991 to April 1992. These values were ignored for this project as the \textbf{Year} attribute below provided the year information for each PFW observation. \item Year: This is the year of the first day of the two-day count observation. This was relevant to this project as this was used to query the dated climate data. \item Month: The month of the first day of the two-day count observation, used to fetch the appropriate climate data. See Year. \item Day: The day of the observation. This is for the first day of the two-day count. This value was used to fetch the correct climate data. See Month and Year. \item NHalfDays: This is the number of the half days of the observation during the two-day count period and is used as a measure of observer effort. The half days range from 1 to 4. This quantification was taken into account in the data models used for analysis. See the Data Analysis section. \item Effort Hrs Atleast: Another measure of the observer's effort. This is a measure of how many hours the participant invested making the observations. This ranged from less than 1 hour to greater than 8 hours. This quantification was taken into account in the data models used for analysis. See the Data Analysis section. \item BirdSpp: This is the name of the species seen at the feeder for a given observation. This is a crucial feature for this project. One of the main questions of this study deals with different sized bird species. The bird species attribute was used to filter out data only pertaining to the species of focus, as due to time constraints only a portion of the PFW data could be processed. These bird species were decided upon consultation with our field expert Professor Francis. More about this is covered in the Bird Species Selection section. \item NSeen: The number of individuals seen, this is the maximum number of the species in view at a single time over the two-day observation period. Another crucial attribute for the project. This data is relevant because it is a quantification of how many individuals of the bird species are using the bird feeders as a food source. \item Valid: This is a flag used to preserve data quality. In regards to this project, the data has already been filtered to only include valid and reviewed data. These data values are not relevant to this study. \item Reviewed: This is a flag used to preserve data quality. Again within the context of this project, the data is already reviewed. Thus, these values are not used. See Valid above. \item Loc ID: This is the unique identifier for the location of the observation. Note that participants, see ID above, can have multiple count locations or Loc ID's. This data is not relevant. \item Sub ID: This is the submission identifier and it uniquely identifies the entire checklist submitted by a participant from the count period. Note, if the species observations are all on the same checklist, then they have the same Sub ID. This is not relevant for the project. \item Obs ID: This is the observation identifier and it uniquely identifies a single observation. All species reported on a single checklist receive different observation identifiers. This data is not relevant for this study. \end{itemize} To summarize, there are many attributes to the PFW data set, but only a handful are useful for this project. The latitude, longitude and states data are all key to this project as they are used to establish a U.S. city location for which climate data can be collected. Additionally, the year, month and day values are important as they ensure the climate data is correctly dated. As a reminder this a winter season study, and the season ranges from November to April. Nseen is the main quantification of the abundance of a bird species at a given feeder location and is used to quantify how many individuals are using the feeders. Note again, the count period for the number seen is 2 days. Lastly, there is bird species, and this is one of the most important attributes. It is covered in detail in the Bird Species Selection section. The hours of effort, location id and number of half days are all factors taken into account for the data analysis and is covered in the later sections. These attributes are only relevant for results and conclusions portions of the project. The rest of the attributes included with the Project FeederWatch data are simply ignored as they are not relevant to the questions being tackled. \section{Bird Species Selection} When selecting bird species for this study, 3 main factors became the driving force. First, the bird species must be non-migratory during the winter season. This filtering criteria ensures that the bird species for this study remain in their respective regions and actually experience the colder climate of winter. The only exception to this requirement is the Evening Grosbeak. However, the species was chosen for this study because of the previous work done on it in regards to the population decline at the feeders. Second, the bird species must be well represented in the PFW data set. Our criteria being that the bird species must at least have a few hundred observations. This will ensure that there is enough data to develop models for after the filtering and clean up steps. The exact details of these data processing steps are covered in the next few sections. The third factor in the bird selection process was the body mass of the species. As a reminder the focus of the project revolves around how different massed birds react to cooler temperatures, with the main comparison being between smaller species and mid to large species seen at the feeders. Thus, the aim of the final selection was to have about the same number of smaller massed bird species as the mid to larger sized bird species. Our biology expert, Professor Francis, offered valuable consultation in selecting the species and also in ensuring the requirements above were met. The final set of bird species that was selected is presented in Table 3.1. This table contains all of the bird species that are used for this study. The first criteria of the bird species being well-represented in the original PFW dataset is met as many of the species have tuples numbering in the thousands, with the only exception being Pine Grosbeak. However, another issue arises with some species having too many tuples, as there are daily limitations on how much climate data can be collected through WU's API. For example, the number of observations for Mourning Dove is 153,417 and the time required for appending climate data to all of the observations would be in terms of weeks not hours given the limitations. This makes processing all of the observations for the 23 species impractical. For this reason only a maximum of 5,000 observations per each bird species were used for the climate data collection. These 5,000 observations were chosen at random. This allows us to obtain climate data for all of the 23 bird species, while still having enough observations for further data processing and modeling. The second goal of including roughly equal numbers of small, mid and large sized bird species is also met as clearly shown by the average body mass measurements in the table. The mean body mass values, in grams, are presented in the column labeled Body Mass of Table 3.1 and these averages were calculated using values from the Cornell Ornithology Lab's website All About Birds and the bird mass handbook by John B. Dunning Jr.~\cite{AllAboutBirds:online, dunning2007crc}. The number of observations for each species in the original PFW dataset is presented in the column labeled PFW Observations. The number of the observations per species that were used in the final data models and analysis are presented in the column Filtered Obs. The filtration and data processing methods used to obtain the final tuples in Filtered Obs. are covered in the later sections. % Please add the following required packages to your document preamble: % \usepackage{longtable} % Note: It may be necessary to compile the document several times to get a multi-page table to line up properly \begin{longtable}[c]{|l|l|l|l|} \caption{Bird Species and the number of observations used in this study.} \label{my-label}\\ \hline Bird Species & Body Mass (grams) & PFW Observations & Filtered Obs. \\ \hline \endhead % American Goldenfinch & 15.5 & 133496 & 4054 \\ \hline Black-billed Magpie & 177.5 & 3704 & 3287 \\ \hline Black-capped Chickadee & 11.5 & 102562 & 2648 \\ \hline Blue Jay & 85 & 106666 & 1264 \\ \hline Brown Creeper & 7.5 & 7457 & 4770 \\ \hline Chestnut-backed Chickadee & 9.5 & 11992 & 4643 \\ \hline Chipping Sparrow & 13.5 & 13004 & 3158 \\ \hline Common Grackle & 108 & 25920 & 2720 \\ \hline Common Redpoll & 15.5 & 4545 & 3615 \\ \hline Dark-eyed Junco & 24 & 159161 & 5000 \\ \hline Downy Woodpecker & 24.5 & 137064 & 4699 \\ \hline European Starling & 78 & 57676 & 4408 \\ \hline Evening Grosbeak & 63.5 & 1595 & 1206 \\ \hline Hairy Woodpecker & 67.5 & 50665 & 4691 \\ \hline Mountain Chickadee & 11 & 5140 & 2995 \\ \hline Mourning Dove & 121 & 153417 & 2876 \\ \hline Northern Mockingbird & 51.5 & 26507 & 4792 \\ \hline Northern Cardinal & 45 & 147259 & 2118 \\ \hline Pine Grosbeak & 56.4 & 250 & 231 \\ \hline Pine Siskin & 15 & 27250 & 2738 \\ \hline Red-bellied Woodpecker & 73.5 & 91767 & 4685 \\ \hline Tufted Titmouse & 22 & 113012 & 4811 \\ \hline White-throated Sparrow & 27 & 61422 & 4517 \\ \hline All Species & NA & 1441531 & 79926 \\ \hline \end{longtable} \section{Given Attributes of Weather Underground Data} There are over a hundred climate attributes provided by Weather Underground, ranging from the standard average temperature values to the very specific measurements of cooling days since the first day of the year~\cite{WUAPI:online}. There are far too many attributes to list and discuss individually, instead listed below are the climate measurements that were deemed important and relevant according to our domain expert Professor Francis. From this initial set of attributes the final 6 were eventually chosen for the actual study. An important note, for the historical climate data, Weather Underground has 2 categories for each of the climate features, observations and daily summaries~\cite{WUAPI:online}. Observations are all of the direct climate measurements for the day. For example, the hourly temperature values, for which there will 24 measurements. While the daily summary is, as the name implies, a single summary value for the day. For example, the mean temperature calculation for the day or the average wind speed for the day, both are single values for the day. For the scope of this study, only daily summary values are useful as it is a single average measurement for the day, which can then be appended to the PFW observation with same day. The observations are ignored as there may be multiple observation values for one day. The listed climate features below are all from the daily summary category. \begin{itemize} \item date: This is the date of the observations. This is very relevant to the study as this ensures that the correctly dated climate values are collected for the PFW data. \item percipi: Precipitation in inches of rain. This feature is also relevant to this project. An important note, WU provides the precipitation measurements both in inches and millimeters, but only the inches value was collected. This is because the data can be later converted if necessary. \item snowfalli: Snowfall in inches of snow. Though this is an important climatic factor, it is not going to be focused on in this project. \item humidity: Humidity represented as a percentage. Again, this may be important for other studies, but for this study these values are ignored. \item maxtempi: Maximum temperature value for the day in Fahrenheit. This is relevant to this project as one of the main questions is to analyze bird species abundance around bird feeders in regards to temperature. Note, Weather Underground does provide the temperature measurements in Celsius, but only the Fahrenheit values were collected. The idea being that the values can be easily converted in the future if required. \item mintempi: Minimum temperature value for the day in Fahrenheit. This is relevant to this study, see maxtempi. \item meantempi: Mean temperature value for the day in Fahrenheit. This is relevant to this project, see maxtempi. \item meanwspdi: Mean windspeed for the day in miles per hour. This is relevant to this study as one of the main questions is to determine whether wind speed has an affect on bird abundance at the bird feeders. Note, Weather Underground does provide the speed in kilometers per hour, but only the miles per hour values were collected. The values can be converted and appended later if required. \item minwspdi: Minimum wind speed for the day in miles per hour. This is not relevant to this study as the mean wind speed for the day is more useful for determining the wind conditions throughout the day, see meanwspdi. \item maxwspdi: Maximum wind speed for the day in miles per hour. This is not relevant to this study as the mean wind speed for the day is more useful for determining the wind conditions throughout the day, see meanspdi. \end{itemize} To summarize, of the subset of the climate features presented above the final group of relevant attributes are as follows: date, precipi, mintempi, maxtempi, meantempi, and meanwspdi. Eventually only the mean temperature was deemed relevant for the actual study in terms of the temperature measurements. The minimum and max temperature values were ignored for the analysis. The details of this will be covered in the Preprocessing for Data Models section. The date is vital to ensure the climate measurements are from the same date as the PFW observation. The precipi values were also deemed relevant for this project. Finally, only the mean wind speed values (meanwspdi) were used in the study, while the max and min wind speed values were ignored. Further information about the reasoning behind these selections is in the next section. \section{Script Implementation for City Attribute} Before the Weather Underground's API can be utilized, the PFW data needs additional location details besides just latitude, longitude and state values of the bird feeders. The majority of the population of United States lives within major city areas~\cite{sutton1997comparison}, as a result much of the data from PFW and WU are from these densely populated urban areas. The challenge now is to filter out all of the bird feeders that are located too far away from a major city. The pseudo code for the following steps is presented in Algorithm 1. The first step is to load the original PFW data into a data frame which can then be logically manipulated. Next, there is the loop that iterates through each row of the data frame. For each row, or observation, the latitude, longitude and state values become the parameters for the method getNearestCity(). This method is then called and the return value is stored in the variable city. The getNearestCity() method is key to this procedure as this determines whether the bird feeder location is close enough to a city. This function contains the top 1,000 U.S. cities and their latitude, longitude and state values in a key value pair data structure. With the given bird feeder location parameters, the distances between the feeder and the nearest major cities of the state are calculated. The city with the minimum distance is then returned, as this will provide the nearest location for a Weather Underground weather station. If the bird feeder is more than 40 kilometers away from the city, then that city is not considered and ignored. If no suitable major city is found, then getNearestCity() will return NULL and that row is deleted from the data frame. Figure 3.1 is a generalized visual representation of the city selection process. The last step simply involves converting the data frame to a CSV file and exporting it with the correct file name. At this stage, all of the PFW observations now have a city attribute, for which climate data can acquired through Weather Underground. The actual implementation, in the Python programming language, of Algorithm 1 is presented in Appendix C. \begin{figure}[h] \centering \includegraphics[width=0.5\textwidth]{figures/BFandCities.png} \caption{This figure illustrates how Algorithm 1 selects a city for a given bird feeder of PFW. The the black circles labeled with BF represents the location of a bird feeder (BF). The colored circles represent a distance radius of 40 kilometers from the city center. For example, the green circle represents distances that are within 40 kilometers from the center of City 3. Given the situation above, BF1 and BF2 would be assigned City 1. Note, even though BF2 is in the range for City 2, it is closer to City 1 and it is picked by the algorithm. BF3 would be assigned City 3 and BF4 would be assigned City 2. Lastly, BF5 is not within 40 kilometers from any major city so all observations with BF5 will be deleted from the final data set as the city is needed for acquiring the climate data. This will be covered in detail in the next section.} \end{figure} \begin{algorithm}[H] \DontPrintSemicolon \SetKwInOut{Input}{Input}\SetKwInOut{Output}{Output} \Input{Original Project FeederWatch Data} \Output{Project FeederWatch Data with cities appended.} $dataFrame\leftarrow "PFWData.csv"$ \; dataFrame.addColumns("city") \ForEach{Row r in dataFrame}{ \tcc{getNearestCity method returns the city which is at most 40km away from the lat/long coordinates. Returns NULL if no city in the state is found.} $city\leftarrow getNearestCity(r.latitude, r.longitude, r.state)$ \; \If{city not NULL}{ r.append("city", city) } \Else { dataFrame.deleteRow(r) } } \Return dataFrame.toCSV("PWFDataCities.csv") \caption{Steps for appending city data to PFW where possible.} \end{algorithm} \section{Script Implementation for Climate Data Collection} Before discussing the climate data collection script, it is worthwhile to point out that the bird count of PFW is performed over the course of 2 days. However, the original PFW data provides only the date for the first count day. As such, climate data for that day is collected and later used for analysis. The assumption being that the climate data applies to both day 1 and day 2 of the count days. The climate data collection portion is relatively straightforward now that there is the city feature along with year, month and day. Algorithm 2 illustrates the steps in a more formal pseudo code fashion. The first step, as in Algorithm 1, is to load the CSV file into a data frame. Afterwards, empty columns are added for the climate attributes and they are named appropriately. Once again, there is a loop that iterates through each tuple, or row, of the data frame. For each row, measurements of mean temperature, minimum temperature, maximum temperature, mean wind speed and precipitation level are collected through WU's API link. The API link, once loaded, returns a JSON object that contains the climate attributes described above. In order to construct this link, the current row's city and date is required, as shown with the constructApiLink() function in Algorithm 2. The constructApiLink() method is quite simple as it only takes the values provided in the parameters and constructs a correctly formatted string, which is the API link. This API link, specific to the provided city and date, is then returned. The link is then loaded through the method loadURL() and the returned JSON object is stored in the variable climateJSON. The final steps of the loop are to check whether the desired climate attribute exists in climateJSON. If this is true then that value is appended to the appropriate column of the current row. Lines 8 and 9 of Algorithm 2 illustrate this by first checking whether maxTemp exists in the JSON, and if the condition is true, the maxTemp value is appended. The last step of the algorithm is to convert the data frame to the CSV format and output the file. As mentioned in the Bird Species Selection section there are limitations to how many WU API calls the script can make in one day. For this reason the script processed data in subsets of 500 observations. The actual implementation for processing a subset of 500, using the Python programming language, is presented in Appendix D. \begin{algorithm}[H] \DontPrintSemicolon \SetKwInOut{Input}{Input}\SetKwInOut{Output}{Output} \Input{Original Project FeederWatch Data with cities.} \Output{Project FeederWatch Data with climate data appended.} $dataFrame\leftarrow "PFWDataCities.csv"$ \; dataFrame.addColumns("meanTemp", "maxTemp", "minTemp", "windSpeed", "precip") \ForEach{Row r in dataFrame}{ $apiLink\leftarrow constructApiLink(r.date, r.city)$ \; $climateJSON\leftarrow loadURL(apiLink)$ \; \If{meanTemp in climateJSON}{ r.append("meanTemp", meanTemp) } \If{maxTemp in climateJSON}{ r.append("maxTemp", maxTemp) } \If{minTemp in climateJSON}{ r.append("minTemp", minTemp) } \If{windSpeed in climateJSON}{ r.append("windSpeed", windSpeed) } \If{precip in climateJSON}{ r.append("precip", precip) } } \Return dataFrame.toCSV("PWFDataClimate.csv") \caption{Steps for appending Weather Underground climate data.} \end{algorithm} \section{Preprocessing for Data Models} To allow for the best data models and insights as possible, additional processing and filtering steps were performed on the dataset. The first step was to filter out bird feeder locations that may not experience low enough winter temperatures to warrant additional feeder visits. To achieve this all bird feeder locations with latitude values below thirty-eight degrees were removed, as these locations may be too mild in terms of winter climate. Figure 3.2 provides a visual representation of the locations of the bird feeders after the latitude filtration step. Next, two additional attributes were added to aid in the next data processing steps. The first added attribute was the average temperature for a given winter season. For example, the winter of 2012 may have had an average season temperature of 35 degrees Fahrenheit. However, during the actual season, daily temperatures may have varied from 30 degrees to 45 degrees. The second attribute appended was the average precipitation levels for the winter season of the year. This is very similar to the average season temperature, but this value represents the mean precipitation levels for the PFW season. For example, the winter of 2007 may have had an average of 4 inches of rain, while actual daily measurements may have varied greatly. There were no seasonal averages for wind speed at the time of the study, so the actual daily average wind speed values were used for the data models. The wind speed values were scaled from 0 to 1 to allow for proper comparisons in the models. All of the seasonal averages were obtained through the ``WorldClim'' database~\cite{worldclimHome:online}. WorldClim provides climate data for minimum, mean, and maximum temperature and precipitation for the years 1970--2000~\cite{worldclim:online}. The spatial resolution of the climate data ranges from 1 square kilometer to 340 square kilometers~\cite{worldclim:online}. As with the Weather Underground data, this climate data is already validated and ready for use in this project. Finally, with the added two attributes it was possible to calculate and append the anomalies in regards to the mean daily temperatures and precipitation levels. It is worth pointing out again that only the mean temperature values are used, while the minimum and max temperature values for the observation are ignored. This is because, we are interested in reactions from species when temperatures were especially low, thus requiring more feeder visits. Deviations of this mean temperature value from the winter season's average temperature gives a better indication of how cold it was on the day of the observation. The anomalies for mean temperature were calculated through the following steps. The average season temperature was subtracted from the collected mean temperature for the observation. The resulting value was either negative, positive or zero. A negative anomaly value represents a lower than average winter temperature while a positive value represents a higher than average temperature. And a value of 0 indicates an average winter temperature. These steps were performed for every row and the resulting value was appended as the temperature anomaly for that row. The anomalies for precipitation value were calculated in the same manor as the temperature. The anomalies allows for more accurate comparisons between the number of individuals observed at the feeders and the deviations from the average winter temperature and precipitation levels. This is because the members of a bird species that live in colder or rainier regions are more accustomed to those conditions than individuals of the same species in warmer and drier regions. So by comparing the abundance to the deviations from the seasonal averages, we get a better sense of how the bird species are reacting to the changes in the regional climates. \begin{figure}[h] \centering \includegraphics[width=1.0\textwidth]{figures/bf_locations_USmap.png} \caption{The cyan colored points on the United States map represents all of the actual bird feeders used in this study. As discussed earlier, all the feeder locations are above the 38 degree latitude boundary. This avoids all observations from locations that may have mild winters. For example, birds in southern California and Texas are going to experience warmer winters than the birds in New York or Michigan simply due to geographic location. Since one of the goals of this project is to explore the reactions of bird species to cold temperatures, it is advantageous to only include geographic areas which experience relatively cold winters.} \end{figure} \section{Linear-mixed Effect Models} Once the data sets were constructed, the next challenge was to pick a valid and useful model. Here Professor Francis was of great help as he had the most expertise in which data model was applicable. The linear-mixed effect model was chosen as it allows us to observe relationships among multiple variables and grouping factors in the data set, more on this later. The linear-mixed effect model is a few degrees higher in complexity than the simple linear model, so it is worthwhile to cover the basics of these models before continuing any further. Lets start at the beginning with the basic linear model. A hypothetical relationship of interest can be represented as this formula: \[ y \sim x \] This formula reads as ``y as a function of x'' or it can also be referred to as ``y predicted by x''. For this paper, the two terms are considered equivalent and is used interchangeably. The y in this case is considered a dependent variable. The variables to the right of the $\sim$ symbol are referred to as the predictor or independent variables. In the formula above the only predictor variable is the x, since it is a simple linear relationship. The predictor variables can also be referred to as fixed effects, but this is not relevant yet. In real world studies, no relationship is just defined by 1 effect as with y being completely determined by x. This is far too deterministic. Other effects at play must be accounted for in the model. There may be many ``random factors'' at play. To account for this lets add another term to the formula: \[ y \sim x + \epsilon\] This new additive factor, $\epsilon$, it accounts for all of the random error. This term stands for everything that effects y that is not x. In other words, from our perspective the $\epsilon$ accounts for all the effects that are uncontrollable by us. This formula is the schematic depiction of the model that would be built using a statistical tool. Now that the simple formula is defined lets take steps towards building the linear-mixed model formula. First lets add another fixed effect, say ``a'', resulting in this formula: \[ y \sim x + a + \epsilon\] It is important to note that ``a'' is added with plus sign, indicating an additive relationship. An additive effect is such that the affect of a on y is not dependent on the affect of x on y. The variables are independent from each other in this sense. This brings up one of the important assumptions of the linear model. The linear model assumes that all of the predictor variables are independent from each other. Logically, this makes sense as if there are 2 or more similarly effecting predictor variables, then it become difficult to identify which predictor is playing the larger role~\cite{RMixedEffect:online}. Now it is time to introduce the random effects, and in order to illustrate this concept lets make the example more concrete and relevant. Though the formula below appears similar to the actual study, it is completely hypothetical and only for explanatory purposes. Lets say that we wanted to explore the relationship between the number of birds seen at the feeders, mean temperature and precipitation. Also assume that there are different bird species in the data set, this is important for the random effects. The formula looks like this now: \[ Nseen \sim temp + precip + \epsilon\] Recall that one of the major assumptions for using the linear model is the independence of the predictors, but multiple responses from the same bird species violates this assumption. This is because individuals from the same species are going to behave similarly. The similarity within species is going to be an idiosyncratic factor that affects all responses from the same species. If this is unaccounted for then the responses will be rendered inter-dependent and not independent. These species specific differences are accounted for in the linear model with the assumption of random intercepts for each species. Essentially this means that each bird species is assigned a different intercept value when the groupings are accounted for, but the line estimates for the groups will have the same slope value as the overall line model for the fixed effects~\cite{RandomInt:online}. These intercept estimations are calculated for each species groupings. These intercepts are the points where the line estimates for each of the species crosses the y-axis. In other words, when temp and precip have the values of 0 for their linear models. To account for the grouping of species, a random effect is added to the formula: \[ Nseen \sim temp + precip + (1|species) + \epsilon\] The bird species term appears to be complicated, but that is only due to the syntax. This syntax mirrors the R programming language, which was used to construct the actual models for this study. This is covered in great detail in the later sections. The ``species'' term translates to ``assume a different intercept for each bird species''. The ``1'' stands for the intercept~\cite{RMixedEffect:online}. The formula above represents a linear-mixed effect model Now perhaps it becomes more clear why this model is named ``mixed''. In the earlier models only the fixed effects, or predictor variables, were taken into account. And then there was a generic error term that added. Now essentially there is more structure to that epsilon error term through the addition of a random effect. However, the $\epsilon$ is still present in the formula above because there are differences present within the species groups. The epsilon errors were calculated for all of our actual models, but the values were not used in the analysis. Instead only the defined random effects were used in the final analysis. \section{R Programming Language and Linear Modeling} \definecolor{codegreen}{rgb}{0,0.6,0} \definecolor{codegray}{rgb}{0.5,0.5,0.5} \definecolor{codepurple}{rgb}{0.58,0,0.82} \definecolor{backcolour}{rgb}{0.95,0.95,0.92} \lstdefinestyle{mystyle}{ backgroundcolor=\color{backcolour}, commentstyle=\color{codegreen}, keywordstyle=\color{magenta}, numberstyle=\tiny\color{codegray}, stringstyle=\color{codepurple}, basicstyle=\footnotesize, breakatwhitespace=false, breaklines=true, captionpos=b, keepspaces=true, numbers=left, numbersep=5pt, showspaces=false, showstringspaces=false, showtabs=false, tabsize=2 } \lstset{style=mystyle} For this project all of the data modeling was done through the use of the R programming language~\cite{RManual}. This is because R has a robust set of packages that provides powerful statistical analysis. Additionally, our biology expert, Professor Francis, is well versed in the use of R for biological studies. The formulas presented in the above sections are just schematic descriptions. The lme4 package of R allows us to actually build the linear models and analyze the relationships~\cite{lme4Usage}. The relationships are analyzed through the model summary, which is presented through a table. It is best to illustrate this with the continued example of the hypothetical bird species study formula: \[ Nseen \sim temp + precip + (1|species) + \epsilon\] The R code for constructing and summarizing the model is presented below. Notice the lack of the $\epsilon$, or the random error variable. The lmer method automatically accounts for the random error, without the explicit declaration in the parameters. However, the random error values are not of particular importance in this study and are ignored for the remainder of the analysis. \begin{lstlisting}[language=R] model <- lmer(NSeen ~ temp + precip + (1|species)) summary(model) \end{lstlisting} The output of the ``summary'' method is presented in Tables 3.2 and 3.3. As a reminder no actual model was built as there is no data set, the tables are only for explanatory purposes. Lets first focus on Table 3.2. This table shows all of the fixed effects and their relationships to the abundance of the bird species. The ``Intercept'' row covers the y intercept of the estimated line, the standard error and the t value. The intercept is estimated through the line models when all of the predictor variables are 0. Recall, the predictor variables are ``temp'' and ``precip''. The intercept estimate is not applicable to this study. The ``Estimate'' column presents the slope estimates for each of the fixed effects. For example, there is a positive slope estimate of 2.02 associated with temp, indicating that as the temp value increased so did the Nseen value. ``SE'' is the column representing the standard error and finally there is the ``t value'' column. The standard error is for the slope estimate value and is used to calculate the t value. The t value is very crucial for this study and is covered in great detail in the Significance and t value section. The other table that is included with model summary is the Random Effects, represented by Table 3.3. It is important to note the variance strengths, shown in the ``Variance'' column, among the different random effects. In this case we only have one random effect, but nonetheless it has caused some variance among the data. This indicates that it was useful to account for this in the data model. The other column is the ``Std. Dev.''. This is measure of how much variability in the dependent measure there is due to bird species~\cite{RMixedEffect:online}. For this study, a simple check was done with every constructed model to make sure the Random Effects table showed variances for the variables chosen. This is covered in more detail in the Data Models for this Study section. % Please add the following required packages to your document preamble: % \usepackage{longtable} % Note: It may be necessary to compile the document several times to get a multi-page table to line up properly \begin{longtable}[c]{|l|l|l|l|} \caption{Model Summary for the hypothetical situation of modeling bird abundance, temperature and precipitation.} \label{my-label}\\ \hline Fixed Effects & Estimate & SE & t value \\ \hline \endhead % Intercept & 7.09 & 4.8 & 1.477 \\ \hline temp & 2.02 & 1.6 & 1.263 \\ \hline precip & -4.32 & 2.47 & -1.749 \\ \hline \end{longtable} % Please add the following required packages to your document preamble: % \usepackage{longtable} % Note: It may be necessary to compile the document several times to get a multi-page table to line up properly \begin{longtable}[c]{|l|l|l|l|} \caption{Random effects for the hypothetical situation of modeling bird abundance, temperature and precipitation.} \label{my-label}\\ \hline Groups & Name & Variance & Std.Dev. \\ \hline \endhead % species & Intercept & 2.24019 & 1.49009 \\ \hline \end{longtable} \section{Significance and t value} The t value is very important for the analysis portion of this study. This value is calculated by dividing the ``Estimate'' by the ``SE''. The resulting value is a measure of the fixed effect strength as well as the significance of the relationship presented by the model. The t value is a more reliable measure of the effect strength than just the slope magnitude because it takes into account the standard error. The t value is a major component in comparing how different climatic factors affect the bird abundance at the feeders and at what strengths. The second importance of the t value comes in the form of significance. We used t values as a proxy for assessing the strength of the effect of predictor variables because the lme4 package does not calculate p-values. In essence, when the magnitude of t is greater than or equal to 2, the standard error of the effect size will be less than 1/2 the absolute value of the effect. In other words, this means the 95 percent confidence interval would never overlap zero with t greater than 2. The confidence intervals were calculated for each of the models constructed using the R method confint. This method takes the constructed model as a parameter and returns the lower and upper bounds of the confidence interval. The effects were considered significant, if this range did not overlap zero. As explained in the previous paragraph, this non-overlap of zero is obtained if the absolute t value is greater than or equal to 2. As such, from this point on the significance of the effect is only going to be quantified by the t value. \section{Assumptions} As with any other model, the linear-mixed model comes with assumptions that have to be satisfied in order for the linear model to be meaningful~\cite{RLinear:online}. To check that the assumptions are satisfied, a simple test can be performed with the residual values. A residual value is the difference between the observed dependent variable and the predicted value from the line estimate~\cite{Residuals:online}. If the histogram of the residuals follows an approximately normal curve then one can assume the assumptions are met as the distance of the data points from the line estimate follow a roughly normal curve when plotted~\cite{LinRegress:online, LinearAssump:online}. The residual histograms were inspected individually by eye, as quantitative methods in R were not advanced enough to detect approximate normal distributions. Too many of the rough normal distributions were rejected by these methods, thus it was decided to check every histogram by eye. In addition to the histograms, Q-Q plots for the residual values were also created and inspected. A Q-Q plot is a scatterplot created by plotting 2 sets of quantiles, or percentiles, against one another~\cite{QQPlots:online}. If both sets of quantities came from the same distribution, then we should see the points forming a line~\cite{QQPlots:online}. For this study the residual values need to follow an approximately normal distribution, as such the residual quantiles are plotted against the normal distribution quantiles. If the residuals follow an approximately normal distribution then the points should roughly follow a line. These specific Q-Q plots are referred to as Q-Q normal plots. One histogram and one Q-Q normal plot were constructed and inspected for each of the data models built. This process is covered in detail in the next section. \section{Data Models for this Study} \definecolor{codegreen}{rgb}{0,0.6,0} \definecolor{codegray}{rgb}{0.5,0.5,0.5} \definecolor{codepurple}{rgb}{0.58,0,0.82} \definecolor{backcolour}{rgb}{0.95,0.95,0.92} \lstdefinestyle{mystyle}{ backgroundcolor=\color{backcolour}, commentstyle=\color{codegreen}, keywordstyle=\color{magenta}, numberstyle=\tiny\color{codegray}, stringstyle=\color{codepurple}, basicstyle=\footnotesize, breakatwhitespace=false, breaklines=true, captionpos=b, keepspaces=true, numbers=left, numbersep=5pt, showspaces=false, showstringspaces=false, showtabs=false, tabsize=2 } \lstset{style=mystyle} The R code for modeling the actual constructed data set is below. Notice that the syntax is very similar to the formulas discussed before. Besides the 3 flags at the end of the parameters, the formula schema is almost identical in format to the previous schematics. \begin{lstlisting}[language=R] model <- lmer(log(NSeen) ~ temp_anomalies + scale(wspeed_kmph) + precip_anomalies + (1|EFFORT_HRS_ATLEAST) + (1|LOC_ID) + (1|NHalfDays) + (1|city), REML=F, data=dat1, na.action="na.fail") \end{lstlisting} The linear mixed-effect model is constructed using the lmer function, with the returned data model stored in the variable model. One model was built for each of the 23 bird species, using the same R code and methods. The results of these species specific linear models are discussed in the Results section. The key components of the lmer function are the parameters, as they determine which variables to build the linear models with. In addition to the 23 models for each of the species, one other general model was built to encompass all of the bird species. The only difference in construction of this model is the inclusion an additional random effect (1\textbar BirdSpp), or bird species. This allows the data points to be grouped by bird species. The code for constructing the generalized linear-mixed effect model is below. \begin{lstlisting}[language=R] model <- lmer(log(NSeen) ~ temp_anomalies + scale(wspeed_kmph) + precip_anomalies + (1|EFFORT_HRS_ATLEAST) + (1|LOC_ID) + (1|NHalfDays) + (1|city) + (1|BirdSpp), REML=F, data=dat2, na.action="na.fail") \end{lstlisting} The first variable used in the method is the log(NSeen) and this is simply the the natural log of number of individuals seen of that bird species, as discussed in the Given Attributes of the Project FeederWatch Data section. This is the dependent variable in the linear models and this followed by the $\sim$ symbol~\cite{RLinearModel:online}. That symbol indicates the term ``function of'' and is followed by the independent variables to the right. These independent variables are also known as predictor variables. One may wonder why the natural log of the number of individuals at the feeders is used in the models. In order to illustrate why the natural log of the Nseen value was used instead of just the Nseen value, it is best to walk through a concrete example. Let's focus on the data set for the species White-throated Sparrow. The histogram of the values of Nseen for this species is presented in Figure 3.3. It is very clear that there is a left skew to the distribution. This results in a non-normal distribution of the residuals, as presented in Figure 3.4. This histogram does not exhibit an approximate normal distribution, in regards to this study. This violates the major assumption of using the linear mixed-effect model, as this model assumes an approximately normal distribution of the residuals. \begin{figure}[h] \centering \includegraphics[width=12cm]{figures/WhiteSparrowNSeen.png} \caption{The histogram of the Nseen values of the White-throated Sparrow.} \end{figure} \begin{figure}[h] \centering \includegraphics[width=12cm]{figures/WSResidNolog.png} \caption{The histogram of the residual values of the linear model constructed with the Nseen values. This is for the White-throated Sparrow.} \end{figure} The histogram of the natural log of the Nseen is presented in Figure 3.5. There is still a left skew present, however there is a better distribution of values. The histogram of the residuals from the linear mixed-effect model with the natural log of Nseen is presented in Figure 3.6. This time the residual values follow an approximately normal distribution, thus allowing us to draw insights from the linear mixed-effect model. \begin{figure}[h] \centering \includegraphics[width=12cm]{figures/WhiteSparrowlogNseen.png} \caption{The histogram of the natural log of Nseen values of the White-throated Sparrow.} \end{figure} \begin{figure}[h] \centering \includegraphics[width=12cm]{figures/WSResidlog.png} \caption{The histogram of the residual values of the linear model constructed with the log(Nseen )values. This is for the White-throated Sparrow.} \end{figure} The predictor variables for the models of this study are temp\_anomalies, precepi\_anomalies and scale(wspeed\_kmph). These variables can also be referred to as fixed effects as they are the main focus of the analysis. Note the scale of the wind speed value is used for the models. This is because the standardized values of the wind speed allow for better comparisons against the other independent variables. Recall, the units of the wind speed values are miles per hour and the other variables, such as temperature anomalies, are just the scaled values. These fixed effects are added together by the addition sign to indicate they are additive properties. Recall, the addition sign indicates that the variables are to be modeled as additive effects. These effects are such that there is no dependability among the additive properties~\cite{AddProps:online}. In other words the effect of temp\_anomalies on log(NSeen) does not depend on the value of scale(wspeed\_kmph) or any of the other additive effects. The last additive effects to cover are the random effects, otherwise known as grouping variables. The random effects allow the data models to account for the grouping of certain observations. For example, the syntax (1\textbar city) tells the method lmer to fit a linear model with a varying-intercept group effect using the variable city. In other words it specifies that all observations that have the same city attribute belong to the same group, as the weather data may have been collected from the same weather stations. As such, those tuples should not be considered independent from each other and the linear model is adjusted accordingly. Since the focus of this project is body mass and the influences of climate conditions specifically, the exact influence of city and other random effects on the log(Nseen) is ignored. To summarize, the grouping variables are (1\textbar city), (1\textbar NHalfDays), (1\textbar LOC\_ID) and (1\textbar EFFORT\_HRS\_ATLEAST). The effort of hours (NHalfDays) and number of half days (EFFORT\_HRS\_ATLEAST) are groupings based on the effort of the observer making that particular observation. This is important to take into account as different observers of PFW have varying degrees of precision in making a bird feeder observations. Additionally, one observer may submit multiple observations. Those observations need to be grouped. Location ID (LOC\_ID) is the location grouping. Previously the city grouping was discussed, this grouping is based on the location of the actual bird feeder. This is also important as there may be multiple observations from a single bird feeder and thus those observations should be viewed as a group. As a reminder, this location ID was already provided in the original PFW data set. % Please add the following required packages to your document preamble: % \usepackage{longtable} % Note: It may be necessary to compile the document several times to get a multi-page table to line up properly \begin{longtable}[c]{|l|l|l|l|} \caption{Random effects for the linear-mixed model of the White-throated Sparrow.} \label{my-label}\\ \hline Groups & Name & Variance & Std.Dev. \\ \hline \endhead % LOC\_ID & Intercept & 0.24015 & 0.4901 \\ \hline city & Intercept & 0.0834 & 0.2889 \\ \hline NHalfDays & Intercept & 0.002411 & 0.0491 \\ \hline EFFORT\_HRS\_ATLEAST & Intercept & 0.002052 & 0.0453 \\ \hline \end{longtable} To clearly illustrate the effects of the grouping variables, the random effects table for the previous example of White-throated Sparrow is presented in Table 3.4. The most important attribute is the Variance, as this determines how much influence the random effect variable has on the model. The higher the variance, the stronger the effect of that variable. It is clear from the table that the LOC\_ID and the city has the most influence, while (1\textbar EFFORT\_HRS\_ATLEAST) and (1\textbar NHalfDays) has less influence, but there is influence nonetheless. For this project all of the Random Effects tables were inspected to ensure there was at least some amount of variation among the groups. A full list of the Random Effects table for each of the 23 species is presented in Appendix A. The (1\textbar BirdSpp) term is for the generalized model as it groups observations by species name. Meaning, data points that have the same bird species name are considered non-independent from each other and thus are treated accordingly for the model. The idea here being to observe any large-scale trends that occur when considering all the species' datasets together as one large dataset. Finally, the last few parameters are flags and the source of the data. The REML=F flag forces the model to use the maximum likelihood for parameter estimates rather than restricted maximum likelihood (REML). The relevance of this is that if flag was the default value of REML then it may produce a non-reliable model. This is especially true when you compare models with different effects, which is true for this analysis. The data= term sets the data source. The data source for the 23 models were the 23 data frames, one for each bird species. Data frames are a form of data representation that allows the code methods to process it. In other words, raw data of many formats are converted to data frames to be used in code functions. For the generalized model, the data source was the combined data frame which contained all of tuples from each of the 23 species. The data frames contain all of the attributes required for constructing the models. In the end, a total of 23 linear-mixed models were constructed and analyzed, one for each bird species. In addition, a general model was constructed with all of the species combined. The na.action= ``na.fail'' flag sets the action for the method when a null value in the parameters of lmer is encountered. The ``na.fail'' ensures that the linear-mixed model object is only returned by lmer if there are no null values in the method arguments. All of the null values were removed from the constructed datasets during the preprocessing steps before the lmer method was actually called, but the flag ensures that no null values were overlooked. \section{Model Outputs} The returned object from the method lmer is the constructed linear-mixed model. The R summary method is then called on this returned model. This summary method returns the slope estimates, standard errors for the slope estimates and the t value for each of the independent variables. As reminder, the t value is the slope estimate divided by the standard error value. Also recall that the independent variables for this study are the temperature anomalies, precipitation anomalies and scaled wind speeds. The full summary table for the White-throated Sparrow linear-mixed model, which includes all of the attributes for each fixed effect, is presented in Table 3.5. % Please add the following required packages to your document preamble: % \usepackage{longtable} % Note: It may be necessary to compile the document several times to get a multi-page table to line up properly \begin{longtable}[c]{|l|l|l|l|} \caption{Model Summary for White-throated Sparrow. Recall that fixed effects is another name for independent variables.} \label{my-label}\\ \hline Fixed Effects & Estimate & SE & t value \\ \hline \endhead % Intercept & 7.09E-01 & 4.88E-02 & 14.522 \\ \hline Temp. Anom. & -2.02E-02 & 1.66E-03 & -12.183 \\ \hline scale(wind speed) & 5.18E-03 & 1.03E-02 & 0.503 \\ \hline Precip. Anom. & 4.32E-03 & 6.47E-03 & 0.667 \\ \hline \end{longtable} The slope estimate, labeled ``Estimate'', of the linear model is the effect of the climate attributes on the natural log of the number seen of the species. For example, lets just focus on the temperature anomalies. If the the slope is a negative value then more individuals of the bird species were seen at the feeders when the mean daily temperatures were lower than the average winter temperature for that year. If the slope estimate is a positive value, then as the temperature anomalies got more negative the less the individuals visit the bird feeders. In other words, as the temperature increased more individuals of the species were observed. Recall that slope magnitude is not an accurate measure of effect strength. The effect strength is measured by the t value. The t value is also analogous to the statistical p-value in terms of model significance, except for the fact that models must have a value of above 2 or less than -2 in order to be considered significant. With p-value, results are considered significant if the p-value is below 0.05~\cite{p-value:online}. For this project, the t value is also a measure of how strong the effect of that particular climate attribute is on the number of individuals seen at the feeders. For example, lets take the anomalies from precipitation levels. If the absolute value of the t value for this fixed effect is lower than 2, then the relationship is not significant enough to warrant insights. If the t value is above 2 then we can safely draw insights from the relationship as the effect of precipitation is strong enough~\cite{t-value:online}. The magnitude of the absolute value of the t value is also significant, as the greater the value the stronger the effect~\cite{t-value-interp:online}. For example, lets say one species has a t value of 2.5 while another bird species has a t value of 4.5. The second species is more effected by the anomalies in precipitation than the first species. \section{Residual Values} In order to use the linear-models for analysis, the main assumption is that the residual values must have an approximately normal distribution. A histogram for each of the 23 bird species' data models and the generalized data model was created and inspected to ensure normal distribution. This process was done through visual inspection. This conclusion was reached after talks with Professor Francis, who has years of experience modeling biological data. Two examples of what we determined to be approximate normal distributions are presented in Figures 3.7 and 3.8. These are the residual values for the bird species White-throated Sparrow and Northern Cardinal, respectively. Finally, Figures 3.9 and 3.10 present the histograms of the residuals that we deemed not approximately normal. They are from the species Hairy Woodpecker and Red-bellied Woodpecker, respectively. \begin{figure}[h] \centering \includegraphics[width=10cm]{figures/WhiteSpResid.png} \caption{Residual values from the White-throated Sparrow data.} \end{figure} \begin{figure}[h] \centering \includegraphics[width=10cm]{figures/NorthCardResid.png} \caption{Residual values from the Northern Cardinal data.} \end{figure} \begin{figure}[h] \centering \includegraphics[width=10cm]{figures/HairyWoodResid.png} \caption{Residual values from the Hairy Woodpecker data.} \end{figure} \begin{figure}[h] \centering \includegraphics[width=10cm]{figures/redbwoodResid.png} \caption{Residual values from the Red-bellied Woodpecker data.} \end{figure} Lastly, Q-Q normal plots for the residual values were also created and inspected to ensure the approximate normal distributions. For these scatter plots, if the points approximately form a line, then one can assume the residual values follow a rough normal distribution. An example of a Q-Q normal plot representing an approximately normal distribution is presented in Figure 3.11. This plot is for the White-throated Sparrow. And finally, a Q-Q normal plot representing a non-normal distribution is presented in Figure 3.12. This plot is for the Hairy Woodpecker. \begin{figure}[h] \centering \includegraphics[width=10cm]{figures/whtspa_qqplot.png} \caption{Residual values from the White-throated Sparrow data. Observe how the majority of the points follow the red line.} \end{figure} \begin{figure}[h] \centering \includegraphics[width=10cm]{figures/haiwoo_qqplot.png} \caption{Residual values from the Hairy Woodpecker data. Observe the major deviations from the red line.} \end{figure}
{ "alphanum_fraction": 0.7691370063, "avg_line_length": 107.5699300699, "ext": "tex", "hexsha": "d3d5bf5f5d55d2bc2d9bfed0db844f446f065f19", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6eff0b7138abd77ab46c7f6149a6bdf1054f7c40", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "siddkahal/MastersThesis_Latex_Doc", "max_forks_repo_path": "chapters/methodology.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "6eff0b7138abd77ab46c7f6149a6bdf1054f7c40", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "siddkahal/MastersThesis_Latex_Doc", "max_issues_repo_path": "chapters/methodology.tex", "max_line_length": 1165, "max_stars_count": null, "max_stars_repo_head_hexsha": "6eff0b7138abd77ab46c7f6149a6bdf1054f7c40", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "siddkahal/MastersThesis_Latex_Doc", "max_stars_repo_path": "chapters/methodology.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 13480, "size": 61530 }
\hypertarget{group__numpp__differentiation}{}\section{Differentiation} \label{group__numpp__differentiation}\index{Differentiation@{Differentiation}} \subsection*{Modules} \begin{DoxyCompactItemize} \item \hyperlink{group__numpp__differentiation__automatic}{Automatic Differentiation} \begin{DoxyCompactList}\small\item\em This module provides the means for performing automatic differentiation. \end{DoxyCompactList}\item \hyperlink{group__numpp__differentiation__finite}{Finite Differentiation} \begin{DoxyCompactList}\small\item\em This module provides the means for performing finite difference differentiation. \end{DoxyCompactList}\item \hyperlink{group__numpp__differentiation__symbolic}{Symbolic Differentiation} \begin{DoxyCompactList}\small\item\em This module provides the means for performing symbolic differentiation. \end{DoxyCompactList}\end{DoxyCompactItemize} \subsection{Detailed Description} Module provides all means for differentiation provided by numpp It includes 3 submodules\+: \begin{DoxyItemize} \item automatic \item finite difference \item symbolic \end{DoxyItemize} \begin{DoxyCode} \textcolor{preprocessor}{#include"numpp/differentiation/differentiation.hpp"} \end{DoxyCode} {\bfseries Check each of the modules to see provided functionality}
{ "alphanum_fraction": 0.8433359314, "avg_line_length": 45.8214285714, "ext": "tex", "hexsha": "0f5133c6961894185ee626e534584e28f10d7bfb", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2018-04-06T06:45:22.000Z", "max_forks_repo_forks_event_min_datetime": "2017-08-06T13:58:27.000Z", "max_forks_repo_head_hexsha": "9149c9d81f70a6ce833fdd1d2f0f2b584e2ac4d9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "szymonmaszke/numpp", "max_forks_repo_path": "docs/group__numpp__differentiation.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "9149c9d81f70a6ce833fdd1d2f0f2b584e2ac4d9", "max_issues_repo_issues_event_max_datetime": "2018-12-16T00:03:38.000Z", "max_issues_repo_issues_event_min_datetime": "2018-11-28T12:15:46.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "vyzyv/numpp", "max_issues_repo_path": "docs/group__numpp__differentiation.tex", "max_line_length": 155, "max_stars_count": 10, "max_stars_repo_head_hexsha": "9149c9d81f70a6ce833fdd1d2f0f2b584e2ac4d9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "szymonmaszke/numpp", "max_stars_repo_path": "docs/group__numpp__differentiation.tex", "max_stars_repo_stars_event_max_datetime": "2021-01-02T15:17:00.000Z", "max_stars_repo_stars_event_min_datetime": "2018-06-06T01:51:17.000Z", "num_tokens": 321, "size": 1283 }
\section{\module{symbol} --- Constants used with Python parse trees} \declaremodule{standard}{symbol} \modulesynopsis{Constants representing internal nodes of the parse tree.} \sectionauthor{Fred L. Drake, Jr.}{[email protected]} This module provides constants which represent the numeric values of internal nodes of the parse tree. Unlike most Python constants, these use lower-case names. Refer to the file \file{Grammar/Grammar} in the Python distribution for the definitions of the names in the context of the language grammar. The specific numeric values which the names map to may change between Python versions. This module also provides one additional data object: \begin{datadesc}{sym_name} Dictionary mapping the numeric values of the constants defined in this module back to name strings, allowing more human-readable representation of parse trees to be generated. \end{datadesc} \begin{seealso} \seemodule{parser}{second example uses this module} \end{seealso}
{ "alphanum_fraction": 0.7929292929, "avg_line_length": 34.1379310345, "ext": "tex", "hexsha": "f963b5363ba6023d23fdd3d7ce4a1109bc30348f", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2022-03-27T01:55:17.000Z", "max_forks_repo_forks_event_min_datetime": "2015-07-16T08:14:13.000Z", "max_forks_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_forks_repo_licenses": [ "PSF-2.0" ], "max_forks_repo_name": "marcosptf/cpython-2.0.1", "max_forks_repo_path": "Doc/lib/libsymbol.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_issues_repo_issues_event_max_datetime": "2021-05-03T21:20:50.000Z", "max_issues_repo_issues_event_min_datetime": "2020-11-18T15:48:14.000Z", "max_issues_repo_licenses": [ "PSF-2.0" ], "max_issues_repo_name": "marcosptf/cpython-2.0.1", "max_issues_repo_path": "Doc/lib/libsymbol.tex", "max_line_length": 73, "max_stars_count": 5, "max_stars_repo_head_hexsha": "73c739a764e8b1dc84640e73b880bc66e1916bca", "max_stars_repo_licenses": [ "PSF-2.0" ], "max_stars_repo_name": "marcosptf/cpython-2.0.1", "max_stars_repo_path": "Doc/lib/libsymbol.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-30T21:47:20.000Z", "max_stars_repo_stars_event_min_datetime": "2022-03-26T21:53:36.000Z", "num_tokens": 219, "size": 990 }
% mnras_template.tex % % LaTeX template for creating an MNRAS paper % % v3.0 released 14 May 2015 % (version numbers match those of mnras.cls) % % Copyright (C) Royal Astronomical Society 2015 % Authors: % Keith T. Smith (Royal Astronomical Society) % Change log % % v3.0 May 2015 % Renamed to match the new package name % Version number matches mnras.cls % A few minor tweaks to wording % v1.0 September 2013 % Beta testing only - never publicly released % First version: a simple (ish) template for creating an MNRAS paper % % MTM: Added my own definitions, including dynamic Figure, Table, % Section and Equation referencing macros. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Basic setup. Most papers should leave these options alone. \documentclass[fleqn,usenatbib,usedcolumn]{mnras} % Uncomment the following to ensure PDFLaTeX processing by arXiv %\pdfoutput=1 % Define which figures you want arXiver to pick up (see https://arxiver.wordpress.com/faq/): %@arxiver{da_vs_z.pdf,da_vs_theta.pdf} % MNRAS is set in Times font. If you don't have this installed (most LaTeX % installations will be fine) or prefer the old Computer Modern fonts, comment % out the following line \usepackage{newtxtext,newtxmath} \renewcommand{\la}{\lesssim} % for less than similar from newtxmath, not \la from mnras.cls \renewcommand{\ga}{\gtrsim} % for greater than similar from newtxmath, not \la from mnras.cls % Depending on your LaTeX fonts installation, you might get better results with one of these: %\usepackage{mathptmx} %\usepackage{txfonts} % Use vector fonts, so it zooms properly in on-screen viewing software % Don't change these lines unless you know what you are doing \usepackage[T1]{fontenc} %\usepackage{ae,aecompl} %%%%% AUTHORS - PLACE YOUR OWN PACKAGES HERE %%%%% % Only include extra packages if you really need them. Common packages are: \usepackage{graphicx} % Including figure files \usepackage{amsmath} % Advanced maths commands %\usepackage{amssymb} % Extra maths symbols \usepackage{pdflscape} % Landscape tables and figures \usepackage[usenames,dvipsnames]{color} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%% AUTHORS - PLACE YOUR OWN COMMANDS HERE %%%%% % Please keep new commands to a minimum, and use \newcommand not \def to avoid % overwriting existing commands. Example: %\newcommand{\pcm}{\,cm$^{-2}$} % per cm-squared % Commenting \newcommand{\MMcom}[1]{\noindent{\color{Plum}\textbf{#1}}} % Commands for proper Section, Table, Appendix, Figure and Equation reference styles \newcommand{\Sref}[1]{Section \ref{#1}} \newcommand{\Tref}[1]{Table \ref{#1}} \newcommand{\Aref}[1]{Appendix \ref{#1}} \sfcode`\.=1001\sfcode`\?=1001\sfcode`\!=1001 \newcommand{\Fref}[1]{\ifhmode \ifnum\spacefactor=1001 Figure \ref{#1}\else Fig.\ \ref{#1}\fi \else Figure \ref{#1}\fi} \newcommand{\Eref}[1]{\ifhmode \ifnum\spacefactor=1001 Equation (\ref{#1})\else equation (\ref{#1})\fi \else Equation (\ref{#1})\fi} % Symbols \newcommand{\cms}{\ensuremath{\textrm{cm\,s}^{-1}}} \newcommand{\ms}{\ensuremath{\textrm{m\,s}^{-1}}} \newcommand{\kms}{\ensuremath{\textrm{km\,s}^{-1}}} \newcommand{\pcm}{\ensuremath{\textrm{cm}^{-1}}} \newcommand{\pcmsq}{\ensuremath{\textrm{cm}^{-2}}} \newcommand{\SN}{\ensuremath{\textrm{S/N}}} \newcommand{\CN}{\ensuremath{\textrm{C/N}}} \newcommand{\chisq}{\ensuremath{\chi^2}} \newcommand{\chisqn}{\ensuremath{\chi^2_\nu}} \newcommand{\lya}{\ensuremath{\textrm{Ly-}\alpha}} \newcommand{\lyb}{\ensuremath{\textrm{Ly-}\beta}} \newcommand{\zem}{\ensuremath{z_\textrm{\scriptsize em}}} \newcommand{\zab}{\ensuremath{z_\textrm{\scriptsize abs}}} \newcommand{\NHI}{\ensuremath{N_\textsc{h\scriptsize{\,i}}}} \newcommand{\Nion}[2]{\ensuremath{N_{\rm #1\textsc{\scriptsize{\,#2}}}}} \newcommand{\lNHI}{\ensuremath{\log(N_\textsc{h\scriptsize{\,i}}/\textrm{cm}^{-2})}} %\newcommand{\ion}[2]{\ensuremath{\textrm{#1\,{\scshape{#2}}}}} \newcommand{\tran}[3]{\ensuremath{\ion{#1}{#2}\,\lambda\textrm{#3}}} \newcommand{\MH}[1]{\ensuremath{\left[\textrm{#1}/\textrm{H}\right]}} \newcommand{\ewr}[3]{\ensuremath{W_\textrm{\scriptsize r}(\ion{#1}{#2}\,\lambda{#3})}} \newcommand{\daa}{\ensuremath{\Delta\alpha/\alpha}} \newcommand{\headsort}{\ensuremath{\textsc{uves\_headsort}}} \newcommand{\popler}{\ensuremath{\textsc{uves\_popler}}} % Setup new table styles \usepackage{array} \newcolumntype{:}{>{\global\let\currentrowstyle\relax}} \newcolumntype{;}{>{\currentrowstyle}} \newcommand{\rowstyle}[1]{\gdef\currentrowstyle{#1}% #1\ignorespaces } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%% TITLE PAGE %%%%%%%%%%%%%%%%%%% % Title of the paper, and the short title which is used in the headers. % Keep the title short and informative. \title[UVES SQUAD Data Release 1]{The UVES Spectral Quasar Absorption Database (SQUAD) Data Release 1: The first 10 million seconds} % The list of authors, and the short list which is used in the headers. % If you need two or more lines of authors, add an extra line using \newauthor \author[M. T. Murphy et al.]{Michael T. Murphy,$^{1}$\thanks{E-mail: [email protected] (MTM)} Glenn G. Kacprzak,$^{1}$ Giulia A. D. Savorgnan,$^{1}$\newauthor Robert F. Carswell$^{2}$\\ % List of institutions $^{1}$Centre for Astrophysics and Supercomputing, Swinburne University of Technology, Hawthorn, Victoria 3122, Australia\\ $^{2}$Institute of Astronomy, University of Cambridge, Madingley Road, Cambridge, CB3 0HA, UK } % These dates will be filled out by the publisher % e.g. \date{Accepted 2013 November 11. Received 2013 November 10; in original form 2013 October 30} \date{Accepted ---. Received ---; in original form ---} % Enter the current year, for the copyright statements etc. \pubyear{2018} % Change the volume number to some relevant information for arXiv submission %\volume{{\rm in press}} % Vertical offset, particularly for arXiv submission \voffset=-0.6in % Don't change these lines \begin{document} \label{firstpage} \pagerange{\pageref{firstpage}--\pageref{lastpage}} \maketitle % Abstract of the paper % Single paragraph, not more than 250 words (200 for Letters), no references. \begin{abstract} We present the first data release (DR1) of the UVES Spectral Quasar Absorption Database (SQUAD), comprising 467 fully reduced, continuum-fitted high-resolution quasar spectra from the Ultraviolet and Visual Echelle Spectrograph (UVES) on the European Southern Observatory's Very Large Telescope. The quasars have redshifts $z=0$--5, and a total exposure time of 10 million seconds provides continuum-to-noise ratios of 4--342 (median 20) per 2.5-\kms\ pixel at 5500\,\AA. The SQUAD spectra are fully reproducible from the raw, archival UVES exposures with open-source software, including our \popler\ tool for combining multiple extracted echelle exposures which we document here. All processing steps are completely transparent and can be improved upon or modified for specific applications. A primary goal of SQUAD is to enable statistical studies of large quasar and absorber samples, and we provide tools and basic information to assist three broad scientific uses: studies of damped Lyman-$\alpha$ systems (DLAs), absorption-line surveys and time-variable absorption lines. For example, we provide a catalogue of 155 DLAs whose Lyman-$\alpha$ lines are covered by the DR1 spectra, 18 of which are reported for the first time. The \ion{H}{i} column densities of these new DLAs are measured from the DR1 spectra. DR1 is publicly available and includes all reduced data and information to reproduce the final spectra. \end{abstract} % Select between one and six entries from the list of approved keywords: http://oxfordjournals.org/our_journals/mnrasl/for_authors/mnraskey.pdf % Don't make up new ones. \begin{keywords} line: profiles -- instrumentation: spectrographs -- quasars: absorption lines -- cosmology: miscellaneous -- cosmology: observations \end{keywords} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%% BODY OF PAPER %%%%%%%%%%%%%%%%%% \section{Introduction}\label{s:intro} The era of 8-and-10-metre telescopes has revolutionised the study of quasar absorption spectra. Before the Keck I 10-metre telescope's first light with the High Resolution Echelle Spectrometer in 1993 \citep[HIRES;][]{Vogt:1994:362}, few quasars were bright enough to be studied with reasonable signal-to-noise ratio (\SN) at resolving powers $R\ga40000$ with smaller telescopes. This new reach was extended to the southern hemisphere in 1999 with the Ultraviolet and Visual Echelle Spectrograph \citep[UVES;][]{Dekker:2000:534} on the European Southern Observatory's (ESO's) 8-metre Very Large Telescope (VLT). Since its commissioning, UVES has contributed to a wide variety of extragalactic discoveries and studies, particularly using absorption lines arising in gas clouds along quasar sight-lines. For example, UVES spectra have been used to trace the metallicity, power-spectrum and thermal history of the intergalactic medium via \lya\ forest absorption lines \citep[e.g.][]{Schaye:2003:768,Kim:2004:355,Boera:2014:1916}. The chemical abundances of circumgalactic environments, traced by the highest-column density clouds -- the damped \lya\ systems (DLAs) and sub-DLAs -- have been studied in detail with UVES \citep[e.g][]{Molaro:2000:54,Pettini:2002:21,Pettini:2008:2011,Dessauges-Zavadsky:2003:447}. UVES spectra have also been used to discover and analyse molecular hydrogen and carbon monoxide in (sub-)DLAs \citep[e.g.][]{Ledoux:2003:209,Noterdaeme:2008:327,Srianand:2008:L39} and, recently, in likely examples of the high-redshift interstellar medium \citep[e.g.][]{Noterdaeme:2015:A24,Noterdaeme:2017:A82}. Measurements of key cosmological parameters have been made with UVES quasar spectra; for example, deuterium abundance constraints on the total energy density of baryons \citep[e.g.][]{Pettini:2008:1499,Pettini:2012:2477,Riemer-Sorensen:2017:3239} and the redshift evolution of the cosmic microwave background temperature \citep[e.g.][]{Noterdaeme:2011:L7}. UVES quasar spectra have even been used to constrain cosmological variations in the fundamental constants of nature \citep[e.g.][]{Quast:2004:L7,King:2008:251304,King:2012:3370,Rahmani:2013:861,Molaro:2013:A68,Murphy:2016:2461}. It is notable that most of the above studies utilised a UVES spectrum of a single quasar. While this demonstrates the high scientific value of such spectra, large samples are often required to enable some scientific projects, to make a meaningful measurement or improvement over previous ones\footnote{A crude illustration of the latter point is that, according to NASA's Astrophysical Data System, all but two of the 15 most cited UVES quasar absorption papers used considerable samples of spectra.}. One difficulty is that reducing raw high-resolution (i.e.\ echelle) spectroscopic data is challenging and can require considerable experience, even with observatory-supplied data reduction pipelines. Combining the spectra from many quasar exposures and continuum-fitting the final spectrum are almost always required, but these steps are not straight-forward and usually fall outside the scope of reduction pipelines. Therefore, most studies using UVES quasar spectra have not made general-purpose, combined spectra publicly available. Doing so can be time-consuming and low priority compared to the immediate, specific scientific purpose for which the UVES observations were proposed. This has severely limited the availability and use of large samples of high-resolution quasar spectra. Considerable efforts have already been invested to address these limitations: \citet{Zafar:2013:A140} presented a database of 250 UVES quasar spectra and \citet{OMeara:2015:111,OMeara:2017:114} has provided 300 HIRES quasar spectra. To further assist, we provide here the UVES Spectral Quasar Absorption Database (SQUAD) first data release (DR1): 467 ``science-ready'' UVES quasar spectra at redshifts $\zem=0$--5. Importantly, the processing steps for each quasar spectrum in DR1 are fully transparent and repeatable. That is, all the steps to reduce and combine the multiple exposures of a quasar, and ``clean'' and continuum fit its combined spectrum, are fully visible and can be repeated by executing a few commands using public, open-source software. This end-to-end transparency and reproducibility ensures that scientific applications for which certain aspects of the data are important (e.g.\ the wavelength calibration accuracy) have an unbroken record of their treatment, from raw data to final, combined spectrum. Our public software ensures that each spectrum can be improved by its users as it is employed for different purposes (each with a different scientific focus), or modified to suit a particular scientific application, and that all changes can easily be made transparent and reproducible to others. We have also attempted to process the DR1 spectra as uniformly as possible so they may be most useful for statistical studies of large quasar samples. The UVES SQUAD differs in several ways from the database of \citet{Zafar:2013:A140}. The latter drew on ESO's Advanced Data Products (ADP) archive: automatic reductions of point-source exposures, in settings for which standardised (``master'') calibration files were available. This used the original {\sc eso-midas} data reduction pipeline which has now been superseded by a pipeline with superior spectral extraction quality and which we optimise for better wavelength calibration accuracy. \citeauthor{Zafar:2013:A140} combined the ADP-reduced exposures of a quasar using custom software. Our experience suggests that this processes can be very important, even critical, for some scientific applications, which further motivates our fully transparent and reproducible approach, and the ability for users to modify the parameters of the reduction and/or combination process easily. Finally, the ADP-reduced exposures are redispersed (re-gridded) onto a linear wavelength grid. Given that UVES is a grating cross-dispersed echelle, the resolving power does not vary strongly with wavelength, so a linear wavelength grid is inappropriate: it will undersample the resolution element at the bluest wavelengths and/or oversample it at the reddest wavelengths. Further, combining multiple exposures onto a common wavelength grid entails redispersing them again (e.g.\ to accommodate different heliocentric velocities). This introduces further correlations between the flux (and uncertainty) in neighbouring pixels, and slightly lowers the resolving power. We avoid these problems in the UVES SQUAD by reducing the raw data and redispersing all extracted exposures once to a common log-linear, vacuum--heliocentric grid. Combining all (extracted) exposures in this way provides the highest \SN, highest-resolution, appropriately sampled final spectrum of each quasar. This paper is organised as follows. \Sref{s:selection} describes how DR1 is defined, how the quasars were identified in ESO's UVES data archive, and presents the DR1 quasar catalogue (\Tref{t:cat}). \Sref{s:reduction} details how appropriate calibration data were identified for each quasar exposure, and the data reduction process. \Sref{s:popler} documents our \popler\ software for combining multiple (extracted) UVES exposures of a quasar to produce the ``science-ready'', final DR1 spectra. \Sref{s:database} describes the basic properties of the DR1 spectra and the main remaining artefacts that most or all spectra contain. In \Sref{s:use} we illustrate three examples of the many applications for the DR1 sample: DLA studies, absorption-line surveys and studies of time-variable absorption lines. In particular, we present a catalogue of 155 DLAs where the \lya\ line is covered by the DR1 spectra, 18 of which have not been reported before. We measure \ion{H}{i} column densities for these new DLAs directly from the DR1 spectra. \Sref{s:conclusion} summarises the paper and discusses future SQUAD data releases. The DR1 database, including all reduced data and files required to produce the final DR1 spectra, are publicly available in \citet{Murphy:2018:UVESSQUADDR1}. \section{Quasar selection and catalogue}\label{s:selection} \Tref{t:cat} catalogues the DR1 quasar and spectrum properties. This first data release is defined as containing the 475 quasars in the ESO UVES archive whose first exposure (longer than 100\,s) was observed before 30th June 2008. All exposures of these quasars (longer than 100\,s) observed before 17th November 2016 were included in the final, combined spectra in DR1. In total, 3088 exposures were selected and successfully processed, with a total exposure time of $10.09\times10^6$\,s (2803\,h, an average of 5.9\,h per quasar). The quasar candidates satisfying these date criteria were selected by cross-matching the coordinates of all ``science'' observations in the ESO UVES archive (i.e.\ with DPR.CATG set to ``SCIENCE'') with the MILLIQUAS quasar catalogue \citep[][updated to version 5.2\footnote{See {\urlstyle{rm}\url{http://quasars.org/milliquas.htm}}}]{Flesch:2015:e010}. While this catalogue aims to include all quasars from the literature (up to August 2017), it will not include unpublished quasars. To identify such cases, we checked the ESO proposal titles and observed object names (as labelled by the observers) for all programs that observed any MILLIQUAS quasar with UVES and searched for any objects observed in those programs that may be quasars (and not already reported in MILLIQUAS). This approach identified 9 of the final 475 quasars selected for DR1, and a further 18 objects that, upon data reduction and exposure combination, were clearly not quasars (17 stars and one galaxy). While it is possible that some quasars were not selected by our approach, our manual checking of the proposal titles and object names should ensure this number is very small or zero. All quasar candidates were identified in the SuperCosmos Sky Survey database \citep{Hambly:2001:1279} to determine a complete set of J2000 coordinates. In cases where a spectrum is available from the Sloan Digital Sky Survey \newcommand\oldtabcolsep{\tabcolsep} \setlength{\tabcolsep}{0.27em} \begin{landscape} \begin{table} \caption{The UVES SQUAD DR1 sample of 475 quasars, of which we provide final spectra for 467. The first 20 columns provide the quasar names, coordinates, redshifts and optical/infra-red photometry sourced from several databases: SDSS, SuperCosmos, NED and SIMBAD (see text). The next seven columns specify the important observational information: the number of UVES exposures, their total duration, the ESO Program IDs, the UVES wavelength settings, slit widths and on-chip binnings used, and the prevailing seeing values reported in the ESO Science Archive (when available, the minimum, median and maximum seeing are reported, and ``NA'' is reported when seeing information is not available). The final five columns summarise the final spectrum properties (see text for full explanations): the ``Spec.\ status'' flag indicates whether we produced a final spectrum (values 0, 1 and 2), or whether multiple objects in the slit, a lack of calibration exposures, or a very high redshift precluded this (values 3, 4 and 5); the dispersion of the final spectrum (\kms\ per pixel); its wavelength coverage (\Sref{s:database}); the continuum-to-noise ratio and nominal resolving power calculated at five representative wavelengths (see \Sref{s:database}). The full table is available as Supporting Information online.} \label{t:cat} \input{tab_cat_exerpt} \end{table} \end{landscape} \setlength{\tabcolsep}{\oldtabcolsep} \noindent \citep[SDSS DR14;][]{Abolfathi:2018:42,Paris:2018:A51}, the SDSS coordinates were used in preference. These coordinates were used to name all quasars in DR1; this results in the unique ``DR1 Name'' field for each quasar in \Tref{t:cat}. Quasar emission redshifts were taken from a hierarchy of cross-matched databases: SDSS, NED, SIMBAD and, if the quasar appeared in none of these databases, our own approximate measurement from our final spectrum. The latter was required in 13 cases, but in one of these (J031257$-$563912), no emission line could be identified from which a redshift could be estimated (its redshift is set to zero in \Tref{t:cat}). This provides a nominal, adopted redshift for each quasar, named ``$z_{\rm em,Adopt}$'' in \Tref{t:cat}. The sky position and redshift distributions of the quasars are plotted in \Fref{f:sky+zem}. \begin{figure*} \begin{center} \centerline{\hbox{ \includegraphics[width=1.16\columnwidth]{DR1_skymap.pdf} \hspace{0.01\columnwidth} \includegraphics[width=0.83\columnwidth]{DR1_zemdist.pdf} }} \caption{Left panel: Sky distribution of the 467 DR1 quasars in \Tref{t:cat} for which final spectra were produced. The colour/shading of the points encodes the quasar redshift as indicated. The light blue shaded stripe and bulge represent the Galactic plane and centre. Right panel: Distribution of emission redshifts ($z_{\rm em,Adopt}$ in \Tref{t:cat}) for the DR1 quasars with final spectra.} \label{f:sky+zem} \end{center} \end{figure*} DR1 contains final spectra for 467 of the 475 quasars in \Tref{t:cat}. For two of these 467 (J000322$-$260318 and J030722$-$494548), not all available exposures could be successfully processed for lack of appropriate calibration exposures; these have been assigned ``Spec.\ status'' values of 1 in \Tref{t:cat}. The wavelength coverage of their final spectra is significantly reduced as a result. J000322$-$260318 also has a ``Spec.\ status'' value of 2, indicating that the wavelength calibration of at least some of its exposures is very obviously distorted; this spectrum should be used with caution. There are 8 DR1 quasars for which we were unable to produce a final spectrum: \begin{itemize} \item Three quasars were at redshifts $\zem\ge5.8$, so the extremely thick \lya\ forest left very little flux in individual exposures (J130608$+$035626, J103027$+$052455, J104433$-$012502; ``Spec.\ status'' value of 3). Combining the exposures using our approach (particularly the order scaling step) is not effective in such cases (\Sref{sss:combination}); the exposures would need to be spectrophotometrically flux calibrated to allow reliable combination. \item Two quasars had more than one object with similar magnitude aligned in the slit (the lensed quasars J110633$-$182124 and J145433$-$401232; ``Spec.\ status'' value of 4). Producing separate, successfully resolved spectra would require non-standard reduction steps not implemented here. The observations of another lensed quasar, J091127$+$055054, had a second, much fainter object aligned in the slit; this did not affect the data reduction steps so we provide a final spectrum for this object but urge caution in using it. \item Three quasars lacked appropriate calibration exposures in the ESO UVES archive so the basic data reduction steps could not be undertaken (J030449$-$000813, J223337$-$603329, J033032$-$270438; ``Spec.\ status'' value of 5). \end{itemize} \section{Data calibration and reduction}\label{s:reduction} \subsection{Science and calibration data selection}\label{ss:selection} UVES is a two-arm, grating cross-dispersed echelle spectrograph mounted on the Nasmyth platform of Unit Telescope 2 of the VLT \citep{Dekker:2000:534}. Combined, the two arms can cover a very broad wavelength range ($\sim$3050--10500\,\AA), albeit with gaps depending on the wavelength settings chosen. The blue arm camera contains a single CCD chip, while the red arm camera contains a two-chip mosaic. Most observations use both arms simultaneously, with the quasar light split into the two arms by a dichroic mirror, in two of nine standard wavelength settings named according to the central wavelength, in nm: 346, 390 and 437 for the blue arm; 520, 564, 580, 600, 760 and 860 for the red arm. However, some observations use a single arm only, and many different non-standard wavelength settings. The wavelength settings used for the DR1 quasar observations are specified in \Tref{t:cat}. Each setting is characterised by a different, nominal wavelength coverage. We only consider exposures taken through an entrance slit; UVES has an image slicer option but we exclude such data from DR1. The slit width and on-chip binning determine the nominal resolving power, i.e.\ that expected for a fully illuminated slit, as is the case for ThAr exposures. However, the quasar exposure's resolving power will be somewhat larger than this, especially if the seeing FWHM is significantly less than the slit width. \Tref{t:cat} therefore provides the range of slit widths, binnings and seeing during the observations as a guide (\Sref{s:database} discusses the nominal resolving power reported in \Tref{t:cat} for the final spectra). We do not include quasar observations made through UVES's iodine absorption cell; these require additional calibration exposures and cannot be combined with non-absorption cell observations of the same quasars. Finally, we exclude exposures taken with the Fibre Large Array Multi Element Spectrograph (FLAMES) mode of UVES. Given the above spectrograph details, a range of calibration exposures are required to reduce each quasar exposure. The default operations model for UVES is that all calibration exposures are taken in the morning after each night's observations. This means that some exposures are used to calibrate more than one quasar exposure, and that associating calibrations with exposures requires a matching algorithm. We requested all available UVES ``science'' exposures of the DR1 quasars, within 1 arcminute search boxes of their adopted coordinates (from SDSS or SuperCosmos), plus matching calibration exposures, from the ESO Science Archive. However, for many quasars the calibration matching algorithm was clearly imperfect so additional, manual requests for a large number of calibration exposures around the observation dates of many quasars were made as well. This resulted in a large database of potential calibration files. We used a custom-written code, \headsort\ \citep{Murphy:2016:UVESheadsort}, to ensure that the best-matching calibrations were selected within a specified ``calibration period'' before and after each quasar exposure. This generally meant selecting the calibration exposure(s) closest in time to the corresponding quasar exposure for five different calibration types: \begin{itemize} \item Wavelength calibration: A single thorium-argon (ThAr) exposure with the same spectrograph settings (i.e.\ wavelength setting, on-chip binning and slit-width), was generally selected. Given the UVES operations model, in most cases the ThAr exposure was taken at least several hours after the quasar exposure. Indeed, the median time difference for all 3088 processed DR1 exposures is 5.4\,h. However, preference was given to ``attached'' ThAr exposures, i.e.\ those taken immediately after quasar exposures without any grating angle changes. An attached ThAr exposure was identified as having the same grating encoder value as the corresponding quasar exposure. In a very small number of cases, particularly for exposures taken before 2001, a slightly different slit width was allowed for the matched ThAr exposure compared to the quasar exposure. \item Order format and definition: ThAr and quartz lamp exposures taken through a short slit are used to identify the echelle orders and define a baseline trace across the CCD. A single exposure of each type with the same spectrograph settings (except for the much shorter slit), was selected in all cases. \item Flat field: Five quartz lamp exposures with the same spectrograph settings were selected. In a small number of cases, especially for early UVES data (before 2003), some quasar exposures only had 3 or 4 matching flat field exposures; rarely, only a single flat field exposure could be found for quasar exposures taken before 2002. \item Bias: The five bias (zero-duration) exposures taken on the same CCD as the quasar exposure were selected in all but rare cases from early UVES operations (before 2002). \end{itemize} \subsection{Reduction with \headsort\ and the ESO Common Pipeline Library}\label{ss:cpl} After determining the best set of calibration exposures for a given quasar exposure, \headsort\ outputs a reduction script for use with ESO's Common Pipeline Library ({\sc cpl}, version 4.7.8\footnote{{\urlstyle{rm}\url{http://www.eso.org/observing/dfo/quality/UVES/pipeline/pipe_reduc.html}}}) of UVES data reduction routines, specifically via the ESO Recipe Execution Tool ({\sc esorex}) command-line interface. This provides a highly streamlined data reduction pipeline -- typically, a quasar exposure can be matched with calibrations and fully reduced within several minutes -- while allowing low-level access to the data reduction parameters for improving the reduction if required. Most of the reduction steps are standard for UVES data and are explained in detail in the UVES {\sc cpl} pipeline manual\footnote{\urlstyle{rm}\url{https://www.eso.org/sci/software/pipelines/uves/uves-pipe-recipes.html}}. Briefly, these standard steps are: \begin{enumerate} \item ThAr lines are identified on the format definition frame and used to constrain a physical model of the UVES echellogram. This identifies the diffraction order numbers and spectral setup of the exposure which assists the order definition [step (ii)] and enables the automatic wavelength calibration in step (iv) below. \item The order definition exposure is used to establish a baseline trace for object light along each echelle order. This acts as an initial guide for extracting the quasar flux. \item The bias and flat-field exposures are combined to form masters which are used to correct the quasar exposure for bias and dark-current offsets and pixel-to-pixel sensitivity variations in the subsequent steps. \item The ThAr flux is extracted along the default trace in the wavelength calibration exposure (corrected for the blaze function using the master flat) and the ThAr lines are automatically, iteratively matched with those in the list carefully selected for UVES in \citet{Murphy:2007:221}. This allows a polynomial (air) wavelength solution to be established for the entire CCD (i.e.\ air wavelength versus pixel position for each echelle order). \item The quasar flux is optimally extracted, with weights determined by averaging the quasar flux along small spectral sections (normally 32 pixels) and either fitting a Gaussian function to this average profile or using it directly, depending on its \SN. The sky flux is extracted simultaneously in this process and is subtracted from the quasar flux in each extracted spectral pixel. The 1$\sigma$ flux uncertainty is also determined from the quasar flux, sky flux and CCD noise characteristics. The flux and uncertainty spectra are corrected for the blaze function using the master flat. \end{enumerate} Step (iv) was then repeated for the DR1 spectra to improve their wavelength calibration. The optimal extraction weights from step (v) were used to re-extract the ThAr spectra and perform a refined wavelength calibration process. This ensures that the same pixels, with the same statistical weights, are being used to establish the wavelength scale for the quasar spectrum (e.g.\ it naturally negates the effects of spatially tilted ThAr lines on the CCD). \headsort's reduction scripts also modify {\sc cpl}'s defaults for the wavelength polynomial degree, the number of ThAr lines to search for and select before performing the iterative polynomial fitting, and the tolerance allowed between the fitted and expected wavelength of ThAr lines. Typically, these new defaults simultaneously increase the number of lines used in the wavelength calibration, reduce the residuals around the final wavelength solution, and marginally improve the accuracy of the solution (due to increasing the polynomial degree). In some cases, particularly with the very blue wavelength settings (e.g.\ the standard 346 and 390-nm settings), these new defaults were modified manually to achieve a more robust wavelength solution (i.e.\ to increase the number of ThAr lines used). \Fref{f:wavcal} shows the resulting root-mean-square deviation from the mean (RMS) of the wavelength calibration residuals for each CCD chip for all DR1 quasar exposures taken with a 1-arcsecond-wide slit and 2$\times$2 on-chip binning in the six most commonly-used wavelength settings. Together, these exposures comprise 53\% of all DR1 quasar exposures. In all but the bluest two settings, our approach to the wavelength calibration produced very similar residuals for almost all exposures. For the 390 and particularly the 346-nm settings, some exposures had substantially larger residuals. This is mainly due to the strong variation in UVES's total efficiency across the wavelength ranges covered by these settings. This causes a deficiency in the number of ThAr lines found above the intensity threshold set by the {\sc cpl} pipeline in the bluest orders. Although the {\sc cpl} pipeline addresses this problem in most cases, in some cases the tolerance for accepting calibration lines had to be increased so that enough lines could be found to provide a robust wavelength solution. This, in turn, causes the observed increase in the RMS of the wavelength calibration residuals in such cases. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_wavcalrms_1p0_2x2.pdf} \vspace{-1em} \caption{Typical RMS wavelength calibration residuals. The distributions show the calibration results for each DR1 quasar exposure taken with a 1-arcsecond-wide slit and 2$\times$2 on-chip binning in the six most common wavelength settings (as labelled).} \label{f:wavcal} \end{center} \end{figure} After step (v) above, the {\sc cpl} pipeline redisperses the flux and uncertainty arrays onto a linear wavelength grid (i.e.\ all pixels have the same size in wavelength), merges the spectra from adjacent spectral orders, and corrects the spectral shape using an estimate of the instrument response curve. However, because the resolving power remains reasonably constant across the wavelength range of grating cross-dispersed echelle spectrographs, and UVES covers more than a factor of three in wavelength range ($\sim$3050--10500\,\AA), a constant dispersion in wavelength is inappropriate; it inevitably oversamples the resolution element in the bluest parts of the spectrum and/or undersamples it in the reddest parts. Also, merging adjacent orders should account for small instrument response and/or blaze correction imperfections and variations by scaling their relative flux before averaging, but the accuracy of this is severely limited in a single exposure due to lack of \SN. However, almost all quasars in DR1 were observed in multiple exposures, so there is an opportunity to improve the merging of adjacent orders by considering all exposures together. And, finally, if the spectra from multiple exposures are to be combined, they will have to be redispersed, again, onto a common wavelength grid after correction for heliocentric motions. For these reasons, we use the not-redispersed, extracted flux and uncertainty arrays of each order (not flux calibrated), from every exposure, to produce each quasar's final spectrum. This was performed using the custom-written code, \popler, described below. All relevant pipeline products are provided for every DR1 exposure in \citet{Murphy:2018:UVESSQUADDR1}. \section{\popler: UVES Post-pipeline Echelle Reduction}\label{s:popler} \popler\ \citep{Murphy:2016:UVESpopler} was designed specifically for combining the UVES data reduction pipeline products to produce a final, continuum-fitted spectrum (however, it can also use products from many other data reduction pipelines, including those often used for Keck/HIRES and Subaru/HDS high-resolution spectra). Below we summarise the overall approach of \popler\ (\Sref{ss:popler_summary}) and how it was applied to create the DR1 spectra (\Sref{ss:poplerDR1}). \subsection{Summary of \popler\ operation}\label{ss:popler_summary} \popler\ reads the extracted flux and uncertainty arrays for each echelle order of each quasar exposure and the wavelength calibration polynomials derived from their corresponding ThAr exposures. Operation then proceeds in two phases: the automatic and manual phases. It is important to note that both phases are entirely reproducible and transparent: all parameters of the automatic phase, and relevant details of all manual ``actions'' subsequently performed in the manual phase, are recorded in a \popler\ log (UPL) file; any user can understand how a spectrum has been formed and modified, and re-run both the entire process themselves. UPL files for all DR1 quasars are provided in \citet{Murphy:2018:UVESSQUADDR1}. The automatic phase attempts to combine the spectra from all orders in all exposures and perform a basic continuum fit. Its main steps are: \begin{enumerate} \item Data validation: Reject pixels whose uncertainty indicates problems in the extraction (e.g.\ negative or extremely small uncertainties). This normally occurs near the order edges for UVES {\sc cpl}-reduced spectra. \item Residual cosmic ray rejection: Reject pixels, and their immediate neighbours, whose flux is much larger than the mean flux for their neighbouring 34 pixels. This rejects ``cosmic rays'' and/or bad pixels not already rejected in the optimal extraction step of UVES {\sc cpl}-reduced spectra. \item Vacuum and heliocentric corrections: The wavelength scales for the individual exposures are converted from air wavelengths to vacuum, and their correction for heliocentric motion is calculated and applied. \item Redispersion: A common log-linear, vacuum--heliocentric wavelength scale is established, with a constant velocity dispersion specified by the user, that covers the remaining pixels in the contributing exposures. The flux and uncertainty spectra from all exposures are linearly redispersed onto this common grid. \item Order scaling and combination: The spectra in all echelle orders are combined in an interactive process starting from the highest \SN\ order. It is combined with the next highest ``rank'' order: that with the highest combination of \SN\ and wavelength overlap. The next highest rank order is combined with the previous two, and so on until all orders are combined. The flux (and uncertainty) in each order is optimally scaled to match the combined spectrum from the previous iteration. For each spectral pixel, the combined flux is the weighted mean of the fluxes from the contributing spectra, which is determined through an iterative clipping process to remove discrepant values. \item Continuum fitting: Each contiguous section of the combined spectrum is broken into ``chunks'', typically 20000\,\kms\ wide below the quasar \lya\ emission line and 2500\,\kms\ above it, which overlap half of the adjacent chunks. An iterative polynomial fit is performed to each chunk: at each iteration, pixels with flux significantly below (typically $>$1.4$\sigma$) or above (typically $>$3.0$\sigma$) the current fit are rejected for the next iteration. To form a smooth, final continuum, the final fits from adjacent chunks are averaged with a weight that decreases linearly from unity at the chunk's centre to zero at its edge. \end{enumerate} The automatic phase of \popler\ generally produces excellent ``quick-look'' spectra that are entirely adequate for many scientific goals, particularly those focussing on individual absorption systems whose transitions collectively occupy only a small fraction of the pixels. However, individual UVES exposures nearly always contain some artefacts that inhibit larger, statistical studies (and are often a nuisance to others as well) because, for example, they can mimic real absorption features in blind searches. The automatic continuum fits redwards of the \lya\ forest are generally very reliable, except in the vicinity of absorption features wider than the chunk size or across very narrow quasar emission lines. However, the automatic continuum in the \lya\ forest is not generally useful; reliable automatic continuum placement is a notorious problem in quasar spectroscopy that limits the speed with which high-resolution spectra can be analysed. Unfortunately, we have not solved that problem here. For these reasons, a manual phase of operation is required. The manual phase of \popler\ allows interactive ``actions'' to be performed on the contributing echelle orders or combined spectrum to improve the quality of the latter and its continuum fit. These actions include: \begin{itemize} \item Clip (and unclip) pixels from contributing orders or the combined spectrum. \item Manually fit or draw (spline) a new continuum to part of the combined spectrum. \item Automatically fit the continuum for the entire spectrum again. \item Manually fit or draw (spline) a continuum to (part of) a contributing order to reshape its flux (and uncertainty) array to that of the combined continuum. \item Scale an order's flux and uncertainty array by a constant factor. \item Rerun the automatic order scaling algorithm starting from the highest rank order not manually scaled by the user. \end{itemize} In general, a user will select portions of a spectrum to manually improve using the above actions based on their specific scientific goals. For example, for studying the intergalactic medium, it will be important to remove artefacts and re-fit the continuum in the \lya\ forest region. In \Sref{ss:poplerDR1} below we describe the approach to improving the DR1 quasar spectra for use towards as many different scientific goals as possible, particularly large statistical studies of DLAs, the intergalactic medium and metal absorption systems. \subsection{Creation of UVES SQUAD DR1 spectra with \popler}\label{ss:poplerDR1} For DR1, \popler\ (version 1.00) was used to create the final quasar spectra. We provide the complete record of parameters used for the automatic phase, and all subsequent manual actions for all DR1 quasars as UPL files in \citet{Murphy:2018:UVESSQUADDR1}. The detailed, specific treatment of each quasar is therefore transparent and any user may reproduce a quasar's final DR1 spectrum with a single execution of \popler\ (with the UPL as its argument). A key aspect of this approach is that users may further improve the DR1 spectrum by using \popler\ to add manual actions to the UPL file. Indeed, we welcome improved UPL files from the user community for inclusion in subsequent data releases. To make each DR1 quasar spectrum useful for as many scientific goals as possible, our approach was to ``clean'' it to at least a minimum standard in the manual phase of \popler. Clearly, this cleaning process is the most time-consuming stage, and all authors contributed to it, so ensuring a strictly uniform standard for all DR1 quasars was not practical. Nevertheless, the following cleaning steps were taken for each quasar in DR1 with a view to making the final spectrum as useful as possible. \subsubsection{Artefact and bad data removal}\label{sss:artefacts} The {\sc cpl}-reduced UVES spectra often contain very obvious artefacts that are similar, though not identical, in different spectra. Thus, they are not removed by the iterative clipping process when the contributing order spectra are combined [step (v) in \Sref{ss:popler_summary}] and can corrupt the final spectrum. Manually removing them from the contributing spectra can often leave a relatively uncorrupted, contiguous region in the final, combined spectrum. A prominent and common example occurs in the bluest 4--5 orders of the red arm spectra due to several bad pixel rows in the corresponding CCD. An example of this problem is shown in \Fref{f:baddata}. For each quasar, we visually scanned the spectrum in \popler\ to identify such artefacts. Clearly, the flux spectrum is one important guide here, as can be seen in \Fref{f:baddata}, and we removed artefacts that obviously affected the flux spectrum. However, \popler\ also displays the $\chi_\nu^2$ spectrum: for each pixel, this is the $\chi_\nu^2$ of the contributing pixel fluxes around their weighted mean value. This assists in identifying regions where the contributing exposures do not match as closely as expected (given their uncertainties); it tends to help find artefacts that have a more subtle effect on the final flux spectrum. \Fref{f:baddata} contains an example at $\approx$4905--4910\,\AA: the significant increase in the $\chi_\nu^2$ spectrum here corresponds to only a small effect on the final flux spectrum. However, to reduce the time for cleaning all DR1 spectra, in many cases we did not remove some of these more subtle artefacts from contributing exposures if they did not affect an obvious absorption feature. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_J014333-391700_baddata.pdf} \vspace{-1em} \caption{Example of manual cleaning of obvious artefacts and bad data from the spectrum of J014333$-$391700. Lower panel: The orange (background) spectrum is the result of the automatic phase of \popler\ and clearly shows the effect of bad CCD rows in the first 4 orders of red arm spectra (580-nm wavelength setting; not all contributing exposures were affected). The blue (foreground) spectrum shows the result after the manual artefact removal stage in \popler: the most obvious artefacts were removed, but there are still some low-level effects from other contributing exposures that were not manually removed (e.g.\ at 4840, 4880 and 4920\,\AA). Upper panel: The $\chi^2$ spectra before (orange, background) and after (blue, foreground) artefact removal. $\chi^2$ for a given pixel is that of the contributing pixels' flux values relative to their combined value (weighted mean). The $\chi^2$ spectra are used in \popler\ to help identify regions where artefacts may be present.} \label{f:baddata} \end{center} \end{figure} Another, less common, artefact in {\sc cpl}-reduced UVES spectra is that of ``bends'': echelle order spectra that have different shapes where they overlap. This can occur for several reasons, e.g.\ time-evolution in the flat-field lamp spectral shape, or poor extractions of the quasar flux near order edges, perhaps due to poorly constrained object traces. When very severe, these affected the final flux spectrum, and so were corrected. More subtle cases were still evident in the $\chi_\nu^2$ spectra and were corrected if they affected an obvious absorption line. Bends in contributing orders were corrected either by removing the bent section or by fitting a continuum to the order (or part thereof) and re-normalising it to match the combined spectrum's continuum shape. \subsubsection{Order rescaling and combination}\label{sss:combination} In spectral regions with very low \SN, or in echelle orders affected by severe artefacts, the relative scaling between an echelle order's spectrum and the combined spectrum [step (v) in \Sref{ss:popler_summary}] can be very poorly or spuriously determined. This occurs frequently in the bluest orders of the 346 and 390-nm settings. It also occurs if the broad trough of a DLA straddles two echelle orders, and below the Lyman limit in the rest frame of DLAs and Lyman limit systems. In these latter examples, there is simply no flux to allow a relative scaling between adjacent orders; this is certainly a disadvantage of the order-scaling algorithm in \popler. To address this in DR1 spectra we manually adjusted the scaling of the highest-ranked order with an obvious scaling problem and re-ran the automatic scaling algorithm starting at that order. This process was repeated for lower-ranked orders to achieve a final spectrum that, visually, appears properly scaled. For the extreme blue orders, where the \SN\ degrades significantly, the best manual scaling factor to choose is often quite unclear, so there may be significant scaling differences between orders in regions of final spectra with $\SN\la5$ per pixel. \subsubsection{Continuum fitting}\label{sss:continuum} As discussed in \Sref{ss:popler_summary}, the continuum fit in \popler's automatic phase is generally not useful in the \lya\ forest, near wide absorption features or over narrow emission lines. For the wide absorption and narrow emission features, we manually fit a new continuum only around the problematic region. This was relatively straight-forward, except for broad absorption-line quasars (BALs), because there are many pixels that are clearly not absorbed so the true continuum is easily discerned by the human eye. However, for the \lya\ forest, our approach was to manually fit the continuum in the entire region below the \lya\ emission line of all DR1 spectra. The well-known problem is that few \lya\ forest pixels are unabsorbed (except perhaps at redshifts $z\la2$), so the true continuum level is usually not at all clear. Our fitting approach is to manually select seemingly unabsorbed ``peaks'' in the \lya\ forest and interpolate between them with a low-order polynomial. This is done in chunks of spectrum ranging from $\sim$2000 to $\sim$50000\,\kms\ wide, depending on how variable the true continuum appears to be. The continuum fits to neighbouring chunks are blended together in a user-defined overlap region to ensure a smoothly-varying final continuum. In some chunks it is not possible to perform a polynomial fit in this way; for example, if BALs or DLAs fall near an emission line (most often the \lya\ emission line) the human eye can discern an approximate shape for a continuum fit but there clearly may be no pixels without substantial absorption to enable a fit. In these chunks, a continuum was simply drawn using a cubic spline function. Our \lya\ continuum fits are, therefore, necessarily subjective and uncertain; however, we expect that they are likely more accurate, and more predictably biased, than algorithmic approaches (certainly the ones currently available in \popler). \Fref{f:Lyacont} compares the automatic and manual continuum fits in part of the \lya\ forest in two $\zem\approx3$ quasars. While the automatic continuum fit to the very high-\SN\ spectrum of J224708$-$601545 appears reasonable, on close inspection it is clearly too low in most regions and obviously too high around 4300\,\AA. However, for the lower-\SN\ spectrum of J013301$-$400628, the automatic continuum is completely inadequate. This is caused mainly by the emission line at 4160\,\AA. The manual fits shown in \Fref{f:Lyacont} are clearly more accurate and useful for statistical studies of the \lya\ forest, and even for more detailed studies of these individual lines of sight. However, even by eye, one can identify potential problems with our manual fits. For example, the manual continuum redwards of $\sim$4300\,\AA\ in \Fref{f:Lyacont} for J013301$-$400628 may by too high in general, perhaps by as much as $\sim$2\%. We discuss the biases in our continuum fits in \Sref{sss:art_cont}. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_Lya_cont.pdf} \vspace{-1em} \caption{Example of automatic (blue dashed lines) and manual (orange solid lines) continuum fits in the \lya\ forest of two DR1 quasar spectra. For J224708$-$601545 (upper panel), the automatic and manual fits are very similar; in this case the parameters of the automatic continuum fitting algorithm of \popler\ were highly tuned to produce a reasonable fit, and this is aided by the very high \SN\ of the spectrum. For the lower-\SN\ spectrum of J013301$-$400628 (lower panel), no combination of fitting parameters could achieve a reasonable fit to the region shown here; the automatic fit shown reflects \popler's default \lya\ forest fitting parameters: individual chunks of $\approx$250\,\AA\ are fit with a fourth-order polynomial. However, six separate manual continuum fits, in overlapping chunks, contributed to the final manual fit for the section of spectrum shown, with polynomial orders varying between 4 and 15; higher orders were needed to fit a reasonable continuum over the emission line at 4160\,\AA.} \label{f:Lyacont} \end{center} \end{figure} \subsubsection{Quality control}\label{sss:quality} All the authors, and several others (see Acknowledgements), contributed to the manual cleaning and continuum fitting steps outlined above. Of course, this may lead to varying quality and homogeneity among the final spectra. To reduce this, one author (MTM) reviewed all DR1 spectra and modified or added manual actions to improve and homogenise them, where necessary. While the purpose of the general cleaning steps above is to ensure a minimum quality and usefulness for all DR1 spectra, some spectra -- or, most often, certain aspects of some spectra -- have received much more extensive attention, including manual changes to the spectrum not described above. These are generally spectra that have already been published elsewhere. One example is the very detailed study of J051707$-$441055 to constrain cosmological variations in the fine-structure constant by \citet{Kotus:2017:3679}. Beyond the basic cleaning steps outlined above, this study focussed on correcting the individual exposures for known, long-range distortions of the UVES wavelength scale \citep[e.g.][]{Rahmani:2013:861,Whitmore:2015:446} and velocity shifts between exposures caused by varying alignment of the quasar within the UVES slits. Such improvements are included in the DR1 versions of the spectra when available. \section{Database of final spectra}\label{s:database} The final DR1 spectral database is available in \citet{Murphy:2018:UVESSQUADDR1}. Each quasar's final spectrum is provided in standard FITS format \citep{Wells:1981:363}, with several FITS headers containing extensive information about the spectrum itself, the exposures that contributed to it and information about their extraction and calibration. We therefore expect that, for almost all scientific uses, only these final spectrum FITS files will be needed. However, for each quasar the database also contains the \popler\ log (UPL) file and {\sc cpl} pipeline products from each contributing exposure. This allows any user to reproduce or modify the final spectrum using \popler. Furthermore, we also provide all the reduction scripts and lists of raw input science and calibration data; this allows the entire reduction procedure to be reproduced or modified if desired. \subsection{Basic spectral properties}\label{ss:properties} \Tref{t:cat} summarises information about the final spectra relevant for most scientific uses. These properties were determined in the following ways. \subsubsection{Dispersion}\label{sss:dispersion} The log-linear dispersion per pixel, expressed as a velocity in \kms, was chosen according to the on-chip binning used for the contributing exposures. The native pixel scale of UVES is $\approx$1.3\,\kms\,pix$^{-1}$ so this was the dispersion set for spectra for which most or all contributing exposures were unbinned in the spectral direction (i.e.\ 1$\times$1 binning). However, most quasars had all, or almost all, 2$\times$2 or 2$\times$1-binned (spectral\,$\times$\,spatial) contributing exposures, so we set a 2.5\,\kms\,pix$^{-1}$ dispersion for these spectra. In more mixed cases we set intermediate dispersion values. \subsubsection{Wavelength coverage}\label{sss:coverage} Each quasar spectrum is accompanied by a pixel status spectrum whose integer value encodes whether each pixel is valid or the reason it is invalid. On a pixel-by-pixel basis, this array defines the detailed wavelength coverage map of the spectrum. However, for absorption-line searches, a more useful definition ignores single invalid pixels within larger, contiguous valid regions. Each DR1 FITS header therefore includes a wavelength coverage map in which valid chunks must be at least 100\,\kms\ wide and contain gaps (runs of invalid pixels) no wider than 10\,\kms. \Tref{t:cat} shows an abridged version of this wavelength coverage map. The upper panel of \Fref{f:wlcov} illustrates the total wavelength coverage of all 467 DR1 quasars with final spectra. The many detailed features in this map generally reflect the different wavelength settings used in the UVES observations. For example, the broad bump at $\sim$3800\,\AA\ is where the 390 and 437\,nm settings overlap, while the dip at $\sim$4500\,\AA\ is where the 390-nm wavelength coverage ends and where that of the 564-nm setting begins. The series of narrow dips redwards of $\sim$9500\,\AA\ are due to gaps in wavelength coverage between neighbouring echelle orders (i.e.\ where the free spectral range exceeds the CCD width). The lower panel of \Fref{f:wlcov} shows the total wavelength coverage in the common quasar rest frame. Here the focus on rest wavelengths $\la$2800\,\AA\ is evident, which is driven by the relative lack of strong absorption lines redwards of the Mg{\sc \,ii} doublet ($\lambda\lambda$2796/2803). \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_wlcoverage.pdf} \vspace{-1em} \caption{Total wavelength coverage of the 467 final DR1 quasar spectra in the observed frame (upper panel) and common quasar rest frame (lower panel). The wavelength coverage of an individual spectrum is determined by requiring that valid chunks (contiguous runs of valid pixels) must be at least 100\,\kms\ wide and contain gaps (runs of invalid pixels) no wider than 10\,\kms.} \label{f:wlcov} \end{center} \end{figure} \subsubsection{Continuum-to-noise ratio (\CN)}\label{sss:CNR} Each DR1 FITS header provides the median \CN\ of the spectrum in bins of 1000\,\kms. \Tref{t:cat} presents these \CN\ values for the bins with wavelength centres closest to 3500, 4500, 5500, 6500 and 7500\,\AA. The left panel of \Fref{f:cnr} shows the \CN\ distribution for the DR1 quasars at these wavelengths. Here, for uniform treatment of quasars with different dispersions, the \CN\ in has been converted a per-2.5-\kms-pixel value for all quasars. Most DR1 quasar spectra have \CN\ in the range 5--60. However, a substantial number of spectra (26 at 5500\,\AA\ and 28 at 6500\,\AA) have $\CN>100$\,per 2.5-\kms\ pixel. Of course, the \CN\ in the most sensitive region of the spectrograph ($\sim$5500--6500) has a non-zero minimum, but the minimum extends to essentially zero for the bluer regions, particularly at wavelengths $\sim$3500\,\AA. The right panel of \Fref{f:cnr} shows the \CN\ distributions at five wavelengths in the common quasar rest-frame which characterise the data quality in the \lya\ forest near the Lyman limit (935\,\AA) and \lya\ emission line (1170\,\AA), and in three regions redwards of \lya\ -- 1450, 1690, 2200\,\AA\ -- which are known to be relatively free of quasar emission lines \citep[e.g.][]{vandenBerk:2001:549,Murphy:2016:1043} \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_CNR.pdf} \vspace{-1em} \caption{Distribution of continuum-to-noise ratio (\CN) per 2.5-\kms\ pixel for the 467 DR1 quasars with final spectra. The left panel shows the \CN\ distributions at a series of representative observed wavelengths. The right panel shows the \CN\ distributions in the common quasar rest frame at two representative \lya\ forest wavelengths (935 and 1170\,\AA) and three relatively emission-line free wavelengths redwards of \lya. The \CN\ is measured as the median value within a 1000-\kms\ window around the nominated wavelength. The inset in each panel shows the high-\CN\ tail of each distribution. Note that a small number of spectra have even higher \CN\ at some wavelengths than the insets cover. For example, J051707$-$441055 has $\CN=342$ per 2.5-\kms\ pixel at 5500\,\AA, the highest in the DR1 sample.} \label{f:cnr} \end{center} \end{figure} \subsubsection{Nominal resolving power ($R_{\rm nom}$)}\label{sss:Rnom} $R_{\rm nom}$ is the mean resolving power of the contributing exposures, in 1000\,\kms-wide bins, determined from their slit widths, assuming the slit is uniformly illuminated. For quasar exposures where the seeing was similar to, or smaller than, the slit width, the real resolving power will be somewhat higher than the nominal value; for example, \citet{Kotus:2017:3679} found that an $\approx$10\% increase in resolving power is typically expected. Each DR1 FITS header provides $R_{\rm nom}$ for all bins (within the wavelength coverage of the spectrum), while \Tref{t:cat} includes values only for the bins with wavelength centres closest to 3500, 4500, 5500, 6500 and 7500\,\AA. $R_{\rm nom}$ was modelled as a second-order polynomial of slit width, $d_{\rm slit}$, in arcseconds: $R_{\rm nom}=a_0+a_1d_{\rm slit}+a_2d_{\rm slit}^2$. The polynomial coefficients, $a_i$, shown in \Tref{t:nom_resol} were derived by fitting the resolving power against slit width of the ThAr exposures in ESO's UVES quality control database\footnote{See \urlstyle{rm}\url{http://archive.eso.org/bin/qc1_cgi}} for years 2010--2016. Seperate sets of coefficients were derived for the blue arm (from the 390-nm setting) and red arm (580-nm setting), and for unbinned and 2$\times$2-binned ThAr exposures (with 0.4--1.2 and 0.8--1.4\,arcsecond slit widths, respectively). The two CCD chips in the red arm were found to have very similar resolving powers in all cases, so they have been treated together and assigned the same coefficients. Some or all of the contributing exposures to 11 DR1 quasars were binned 3$\times$2; however, their nominal resolving powers were assumed to be the same as for 2$\times$2-binned exposures. \begin{table} \caption{Polynomial coefficients used to determine the nominal resolving power (i.e.\ for a uniformly illuminated slit), as a function of slit width: $R_{\rm nom}=a_0+a_1d_{\rm slit}+a_2d_{\rm slit}^2$ for $d_{\rm slit}$ in arcseconds. See text in \Sref{s:database} for details.} \begin{center} \label{t:nom_resol} \begin{tabular}{lcccc} \hline \multicolumn{1}{c}{Arm} & Binning & $a_0$ & $a_1$ & $a_2$ \\ \hline Blue & None & 10033 & 63237 & $-$24986 \\ Blue & 2$\times$2 & 22011 & 50563 & $-$22803 \\ Red & None & 8533.3 & 52709 & $-$16005 \\ Red & 2$\times$2 & 28846 & 28505 & $-$9533.3 \\ \hline \end{tabular} \end{center} \end{table} \subsection{Remaining artefacts, systematic effects and limitations}\label{ss:artefacts} While we invested significant effort to ensure that at least a minimum standard of cleaning and quality control was applied to each spectrum (see \Sref{ss:poplerDR1}), there are numerous remaining artefacts in all DR1 spectra. We discuss the most prominent of these below. It is important that users of the DR1 spectra consider the effect these remaining artefacts may have on their analyses; we expect that most statistical analyses of the spectra will be somewhat sensitive to one or more of these artefacts. Users are encouraged to discuss these and other potential (or more subtle) systematic effects with the corresponding author (MTM). \subsubsection{Continuum errors}\label{sss:art_cont} \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_cont_overestimate.pdf} \vspace{-1em} \caption{Examples of overestimated automatic continuum level. The top panel shows a very high \SN\ region while the bottom panel shows a very low \SN\ region, i.e.\ the extreme blue end of the 390-nm standard wavelength setting. The spectra (blue) have been smoothed by a Gaussian filter (with a 20-pixel kernel) to aid the eye in discerning the bias in the continuum level. In both cases, the continuum is generally overestimated by up to $\sim$0.25 times the flux RMS. That is, the magnitude of the bias scales with the \SN; analyses of lower-\SN\ DR1 spectra are more likely to be affected by this bias.} \label{f:cont_overest} \end{center} \end{figure} As discussed in \Sref{ss:popler_summary} and \Sref{sss:continuum}, the continuum in most regions redwards of the \lya\ emission line is automatically fit, while it is manually fit in the \lya\ forest. In addition to the inherent inaccuracy in defining a \lya\ forest continuum (see \Sref{sss:continuum}), these two approaches cause two significant systematic continuum errors in the DR1 spectra: \begin{enumerate} \item Overestimated automatic continuum fit: Even in (by-eye) completely unabsorbed regions redwards of the \lya\ emission line, the automatic continuum level is generally slightly overestimated. The mean continuum-normalised flux in such regions lies 0.05--0.25$\bar{\sigma}$ below unity, for $\bar{\sigma}$ the mean normalised flux uncertainty. This continuum overestimate is caused by the asymmetric prejudice built in to our iterative continuum fitting algorithm (see \Sref{ss:popler_summary}): absorption features are far more common than (spurious) emission features (e.g.\ cosmic rays), so the algorithm rejects pixels with flux (default values) $>$3.0$\sigma$ above and $>$1.4$\sigma$ below the current fit for the next iteration. For high \SN\ DR1 spectra, this effect is clearly a small fraction of the continuum level and therefore unlikely to cause problems for most analyses. The upper panel of \Fref{f:cont_overest} shows an example. However, the overestimate will be a larger proportion of the continuum level for lower-\SN\ regions of all DR1 spectra; for example, the lower \SN\ at the blue extreme of many spectra causes a noticeable overestimate, as illustrated in the lower panel of \Fref{f:cont_overest}. \item Redshift-dependent bias in manual continuum fits of the \lya\ forest: As described in \Sref{sss:continuum}, our approach to manually fitting a continuum in the \lya\ forest involved visually identifying ``seemingly unabsorbed peaks'' and interpolating between them with the same iterative polynomial fitting algorithm used redwards of the \lya\ emission line. Upon very close inspection of the DR1 spectra, there appear to be convincingly unabsorbed continuum regions of the \lya\ forest at $z\la3$; i.e.\ the lower number density of forest lines leaves more truly unabsorbed regions to fit. Therefore, we expect at least the same overestimation of the continuum in the $z\la3$ \lya\ forest as described in point (i) above. However, the increasing \lya\ forest line density means that few (or no) truly unabsorbed regions exist at higher redshifts, so our fits will underestimate the true continuum. While it is likely to increase with redshift, it is difficult to estimate the magnitude of this bias. However, we refer users of DR1 spectra to the study by \citet{Becker:2007:72} as a guide: using a similar continuum fitting approach, fitting theoretical models of the \lya\ flux probability distribution required upward adjustments to their continuum levels by $\la$3\% at $z\sim3$, and by $\sim$7--17\% at $z\sim4$--5. \end{enumerate} Finally, the incidence of Lyman-limit systems (where there is significant remaining flux bluewards of the limit) and broad absorption line (BAL) features makes the definition of the continuum dependent on the scientific question being addressed: for example, what constitutes the continuum is very different when studying a Lyman limit or the \lya\ forest bluewards of it. Generally, in such cases, we attempted to fit or interpolate a continuum that would be useful for most users. However, we urge those studying Lyman-limit systems and BALs in DR1 spectra to redefine the continuum placement accordingly. \subsubsection{Telluric features}\label{sss:telluric} \Fref{f:telluric} illustrates the strongest common telluric features in the DR1 spectra. In general, no attempt was made to remove telluric features in individual spectra. While, in principle, variation of the heliocentric velocity among a large number of exposures, plus our iterative removal of outlying data when combining exposures, can remove many telluric features from the final spectrum, these criteria are rarely met in practice. This means that most DR1 spectra contain many telluric absorption lines -- particularly the O$_2$ $\gamma$, B and A bands ($\sim$6300, 6880 and 7620\,\AA, respectively) and the H$_2$O bands at $\approx$6470--6600, 6830--7450, 7820--8620 and 8780--10000\,\AA\ -- and residuals from imperfectly subtracted sky emission lines, particularly at $\ga$7000\,\AA, the strong \ion{O}{i}\,$\lambda$5578 sky emission line and the Na{\sc \,i}\,$\lambda$5891/5897 doublet. In the few spectra with high \SN\ at $\la$3400\,\AA, the broad Huggins ozone bands are visible. If these bands occurred redwards of the \lya\ forest (i.e.\ for quasars at $\zem\la1.5$), they were reasonably well fit by our continuum-fitting process, but our fits are not likely to reflect the complex shapes of individual bands. However, for higher redshift quasars, our continuum fitting approach generally ignored the Huggins bands; users of such spectra should be cautious of the continuum fit and corresponding normalised flux spectrum below $\approx$3300\,\AA. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_telluric.pdf} \vspace{-1em} \caption{Common, strong telluric features in the DR1 quasar spectra. The top panel shows two spectra (J051707$-$441055: lower, purple; J083052$+$241059: upper, orange; both are Gaussian-smoothed with a 6-pixel kernel for clarity) affected by the broad Huggins O$_3$ bands at $\la$3300\,\AA. This is easily seen in the upper spectrum which does not cover the \lya\ forest: our undulating continuum fit (dashed line) largely matches the band shapes (though not completely; e.g.\ the broad dips below the continuum fit near $\approx$3156, 3202 and 3225\,\AA). These bands are not fitted in the lower spectrum because it covers the \lya\ forest. The other panels show poorly subtracted telluric emission lines (\ion{O}{i}\,$\lambda$5578 in J124913$-$055919 and \ion{Na}{i}\,$\lambda$5891/5897 in J001306$+$000431) and strong molecular absorption bands in J051707$-$441055.} \label{f:telluric} \end{center} \end{figure} \subsubsection{Cosmic rays and bad pixels}\label{sss:cosmic} The {\sc cpl} data reduction suite, and the post-reduction processing and combination of exposures in \popler, both attempt to identify and mask `cosmic rays' and bad pixels. However, many remain unidentified and, at least, imperfectly removed from individual exposures. DR1 spectra with fewer contributing exposures therefore contain many, generally narrow ($\la$5 pixels wide) remaining cosmic rays and bad pixel residuals. These are much less common in spectra with more than $\sim$5 contributing exposures. Nevertheless, even in such cases, users should be cautious of residual cosmic ray and bad pixel artefacts in deep absorption lines: as \Fref{f:cosmic} illustrates \popler\ does not remove sharp, positive flux spikes in regions of low local relative flux because these can be real velocity structure in metal-line absorption systems. These were generally not removed in our manual cleaning process. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_cosmic.pdf} \vspace{-1em} \caption{Example of a `cosmic ray' (at 3894.2\,\AA) in the saturated core of \lya\ forest line in the DR1 spectrum of J033108$-$252443. \popler\ does not remove such artefacts because similar, real spectral features are often seen in saturated metal lines, and our manual cleaning process did not attempt to identify and remove all such artefacts.} \label{f:cosmic} \end{center} \end{figure} \subsubsection{Unidentified absorption artefacts}\label{sss:unidentified} During the manual cleaning process, several features were noticed in many spectra that, in some cases, were clearly not due to real absorption systems: they had slightly different positions and shapes in different exposures of the same quasar, similar to the artefacts from bad rows of CCD pixels discussed in \Sref{sss:artefacts}, but much narrower and weaker in general. When these features were clearly spurious they were manually removed. However, in many spectra, the author cleaning the spectrum either did not notice these artefacts -- they vary in strength considerably from quasar to quasar, and can be weak (or apparently absent) and not obvious to visual inspection -- or there was not enough evidence to confirm they were not real absorption lines (e.g.\ there was not a significant difference between the features in different exposures). Upon completion of the cleaning of all DR1 spectra, it was clear that similar features were often found at similar wavelengths in different quasar spectra, confirming their spurious origin. However, their origin is not currently clear. Simple checks for bad pixel runs, ThAr remnants and flat-field features did not reveal a clear cause. To more systematically reveal these unidentified features, and other common remaining artefacts, we combined the final spectra of the 131 DR1 quasars at $z<1.5$. The redshift criterion ensures that the composite is not contaminated by the \lya\ forest. The spectra were redispersed onto a common vacuum--heliocentric wavelength scale with 2.5-\kms\,per pixel dispersion and combined using a clipped mean for each pixel. A contributing pixel with flux more than 3$\sigma$ below, or 4$\sigma$ above, the mean was removed ($\sigma$ is its flux uncertainty) to avoid real absorption lines or sky-line emission residuals and reveal features common to many spectra. \Fref{f:unidentified} shows the main unidentified artefacts revealed by the composite spectrum at 4716, 4744, 5240, and 5580--5800\,\AA. The width and shape of the composite features reflects those found in individual spectra. However, they do seem to appear at slightly different (vacuum--heliocentric) wavelengths in individual spectra, so the composite features may be somewhat broadened. The composite spectrum also reveals many weaker features. We provide the clipped mean composite DR1 spectrum in \citet{Murphy:2018:UVESSQUADDR1} so that users can utilise it directly to identify and mask spurious spectral features that may affect their absorption line surveys. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_unidentified.pdf} \vspace{-1em} \caption{Main unidentified artefacts revealed by the clipped mean composite spectrum of $\zem<1.5$ DR1 quasars. These features are detectable in many, but certainly not all, individual DR1 spectra. In some cases they were removed in the manual cleaning process, but not in most cases, as the composite spectrum shows.} \label{f:unidentified} \end{center} \end{figure} \subsubsection{Underestimated uncertainties at low flux levels}\label{sss:low_flux} Common to all DR1 spectra is that the flux uncertainty arrays of individual {\sc cpl}-reduced exposures are underestimated when the quasar flux is low. This is easily noticed in \popler\ as peaks in the $\chi^2_\nu$ spectrum (see \Sref{sss:artefacts}) in strong and, especially, saturated absorption lines, or where the \SN\ of individual exposures is $\la$5\,per pixel. It is therefore most noticeable in the \lya\ forest. \Fref{f:unc_underestimate} illustrates two examples in the \lya\ forest of one DR1 quasar spectrum. $\chi^2_\nu$ typically reaches $\sim$2 in such regions, indicating that the uncertainty array is underestimated by a factor of $\sim$1.4. However, this factor depends on the \SN\ of individual exposures: it tends to be larger for lower \SN\ exposures. We suspect that the {\sc cpl} reduction pipeline underestimates the noise contribution from the sky flux during the optimal extraction. This problem also existed in the previous, {\sc eso-midas} data reduction code for UVES. To compensate some of its effects on absorption line studies, some authors have increased the flux uncertainty estimate in the cores of deep/saturated absorption lines in UVES spectra \citep[e.g.][]{King:2012:3370}. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_unc_underestimate.pdf} \vspace{-1em} \caption{Example of the underestimated uncertainty spectrum in regions of low flux. Top two panels: Final DR1 spectrum (blue) of J033108$-$252443 in a region where all 14 contributing exposures have low \SN\ ($\la$5\,per pixel) and, therefore, where the uncertainty spectrum (orange; shown as 3$\sigma$ for clarity) is dominated by sky photon noise; this is also indicated by its low contrast between areas of unabsorbed continuum and saturated lines. The corresponding $\chi^2_\nu$ spectrum ($\chi^2$ per degree of freedom of the flux in the contributing exposures around the weighted mean flux) is generally higher than expected -- i.e.\ $\approx$2 (c.f.\ expectation of $\approx$1) -- demonstrating that the uncertainty spectrum is underestimated. Lower two panels: Same as upper two panels but for a higher \SN\ region. Here, the quasar photon noise dominates and $\chi^2_\nu$ is $\approx$1 in unabsorbed regions. However, in strong or saturated lines, where the sky photon noise dominates, $\chi^2_\nu$ increases to $\approx$2. This indicates that the sky noise contribution is underestimated.} \label{f:unc_underestimate} \end{center} \end{figure} \subsubsection{Bad data in individual exposures}\label{sss:bad_data} As noted in \Sref{sss:artefacts}, our approach for removing bad data from contributing exposures was to do so when they affected an obvious absorption feature. Artefacts from remaining bad data may still affect, or even mask, very weak absorption features that were not noticed by eye in the manual cleaning process. Users aiming to detect weak absorption features in the individual DR1 spectra are advised to inspect the flux, uncertainty and $\chi^2_\nu$ spectra -- of both the combined spectra and their contributing exposures -- in detail. Indeed, \popler\ was specifically designed to display these details to allow such specific quality control steps. \subsubsection{Blaze function variations and remnants}\label{sss:blaze} The blaze function for each echelle order of an exposure is approximated using the master flat field by the {\sc cpl} reduction software. This, in principle, can change from exposure to exposure as the alignment of optical elements change slightly with time or wavelength setting, or with changes in the flat field lamp spectrum (e.g.\ as it ages) or illumination pattern on the CCDs. As explained in \Sref{sss:artefacts}, these effects (and possibly others) cause ``bends'' between the spectral shapes of spectra from overlapping orders, the most obvious of which we attempted to identify and correct in the manual cleaning process. Our priority was to address this problem when it affected an obvious absorption line. However, for very weak absorption features (especially broader ones) that may not have been noticed by eye, weak bends may not have been removed. And, where obvious absorption lines were not found, noticeable bends will still be present in the DR1 spectra, particularly in high \SN\ cases. An example of remaining bends in the spectrum of the bright quasar J133335$+$164903 is shown in the upper panels of \Fref{f:bends}. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_bends.pdf} \vspace{-1em} \caption{Examples of artefacts remaining from the blaze function removal. Top panels: Spectra from overlapping echelle orders may have different shapes in the overlap region -- ``bends'' -- causing higher $\chi^2_\nu$ values and, potentially, spurious effects on weak absorption features in the combined spectrum. The $N_{\rm pix}$ spectrum shows the number of contributing echelle orders. The increase from $N_{\rm pix}\approx11$ to $\approx$22 indicates the overlap region of neighbouring echelle orders; the increase in $\chi^2_\nu$ in these overlap regions is due to bends. In this example, no adverse effects on the normalised flux spectrum are apparent. Lower panels: Imperfect blaze function removal leaves broad undulations in the flux spectrum that coincide with the overlap region between echelle orders (indicated by the $N_{\rm pix}$ spectrum). In this example, note the sharp drop in the $N_{\rm pix}$ spectrum at $\approx$5031\,\AA: the low-\SN\ red ends of the echelle orders covering $\approx$4990--5040\,\AA\ have been manually removed to ensure they did not affect the strong absorption line system at $\approx$5031--5045\,\AA.} \label{f:bends} \end{center} \end{figure} The correction for the blaze function also appears to be imperfect in systematic ways. Many DR1 spectra therefore contain remnants of the blaze function that appear as ripples or undulations in the flux spectrum over echelle-order scales. An example is shown in the lower panels of \Fref{f:bends}. These undulations have amplitudes $\la$5\% of the continuum level, and are usually substantially smaller. In non-\lya\ forest regions, our continuum fitting approach will largely correct for these blaze remnants, as is evident in \Fref{f:bends}. However, in \lya\ forest regions, where individual continuum fits cover a larger range of wavelengths, and where the forest obscures such broad, shallow undulations, these blaze remnants may still significantly affect the final normalised flux spectrum. \subsubsection{Zero level errors}\label{sss:zero} Inspection of {\sc cpl}-extracted UVES exposures with relatively low \SN\ often reveals imperfect zero levels: the average flux in saturated line cores is significantly different to zero. This indicates that the sky flux level is inaccurately measured in the optimal extraction process. \Fref{f:zero} shows some typical examples of this problem in DLA absorption troughs (upper panel) and saturated \lya\ forest line cores (lower two panels). As the figure illustrates, the zero-level error in some final spectra can be of order $\sim$2--4\%; in extreme cases -- exposures with very low \SN, where the trace of the quasar is not well determined -- the error can be up to $\sim$10\%. \Fref{f:zero} also shows that the zero level can be overestimated or underestimated (compare the lower two panels) and that, in some cases, it can vary from too low to too high over relatively short wavelength ranges (upper panel); however, in most cases the zero level error appears to have the same sign and not vary substantially in magnitude over much larger wavelength ranges (typically $\ga$300\,\AA). We have not attempted to correct for these zero-level errors in the DR1 spectra so users should account for them when, for example, modelling strong or saturated absorption lines (e.g. DLAs). \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_zero_level.pdf} \vspace{-1em} \caption{Examples of zero-level errors in the DR1 spectra. The spectra (blue histograms) have been smoothed (black line) to show a running average flux, compared with the 1$\sigma$ uncertainty array (orange line). Top panel: A DLA trough in the spectrum of J020900$-$455026 where the zero level varies from $\approx$3\% too high ($\approx$4061--4068\,\AA) to $\approx$2\% too low ($\approx$4072--4084\,\AA) over a relatively short wavelength range. Middle panel: Saturated \lya\ forest line cores where the zero level is overestimated by $\approx$1--3\% (same spectrum as top panel). Bottom panel: Saturated \lya\ forest lines in the spectrum of J231359$-$370446 where the zero level is underestimated by $\approx$2--3\%.} \label{f:zero} \end{center} \end{figure} \subsubsection{Wavelength scale shifts and distortions}\label{sss:wave} The wavelength calibration accuracy of UVES has been the specific focus of many quasar absorption studies, particularly those seeking to constrain possible variations in the electromagnetic fine-structure constant and proton-to-electron mass ratio (using metal-line and H$_2$ absorption, respectively). The wavelength scale is set by comparison with a ThAr lamp exposure, and several effects shift and/or distort the true quasar wavelength scale with respect to this: \begin{enumerate} \item Mechanical drifts and changes in the refractive index of air were designed to be compensated for by resetting the grating angles \citep{Dekker:2000:534} which, in practice, is limited to $\sim$0.1--0.2\,\kms\ accuracy; \item Differences in alignment of the quasar in a slit between exposures, and/or between the two slits (i.e.\ between the spectrograph arms), produces (approximately) velocity-space shifts of, typically, up to $\sim$0.4\,\kms\ \citep[e.g.][]{Molaro:2013:A68,Rahmani:2013:861,Evans:2014:128,Kotus:2017:3679} and up to $\sim$2\,\kms\ in extreme cases; \item Intra-order distortions (i.e.\ wavelength-dependent shifts on echelle order scales), that have a largely (but not entirely) repeated shape and amplitude from order to order, have amplitudes of typically $\sim$0.1\,\kms\ \citep[up to 0.4\,\kms\ in extreme cases; e.g.][]{Whitmore:2010:89,Whitmore:2015:446}; \item Long-range distortions between the science object and ThAr wavelength scales have been identified that have magnitudes of typically $\sim$0.1\,\kms\ per 1000\,\AA\ and up to $\sim$4 times larger in extreme cases \citep[e.g.][]{Rahmani:2013:861,Bagdonaite:2014:10,Whitmore:2015:446}. \end{enumerate} The magnitude, sign and shape of the latter two distortion effects is quite variable, and can change substantially over $\sim$1--3 day periods. In general, the individual exposures and final DR1 spectra are not corrected for the above effects. However, they are relatively small -- typically $\la$20\,\% of a (unbinned) pixel -- so are not likely to significantly affect most applications. A small number of DR1 spectra have been corrected using asteroid or iodine-cell stellar observations \citep[e.g.][]{Evans:2014:128}, solar twin stars \citep[e.g.][]{Dapra:2015:489,Dapra:2016:192} or spectra of the same object on better-calibrated spectrographs \citep[e.g.][]{Kotus:2017:3679}. The UPL files include these corrections in such cases. \section{Scientific uses}\label{s:use} As described in \Sref{s:intro}, numerous scientific questions can be addressed with the DR1 quasar spectra. In this section we seek to highlight and assist the large-scale statistical studies of quasar absorption systems that are possible with such a large sample of high-resolution spectra. Specifically, we illustrate how the DR1 spectra will be useful for detailed DLA studies, absorption line surveys, and studies of time-variable absorption lines. \subsection{Damped \lya\ system studies}\label{ss:dlas} DLAs contain most of the neutral hydrogen in the universe at all epochs currently probed, from $z\sim5$ down to 0 \citep[see review by][]{Wolfe:2005:861}. Their high \ion{H}{i} column densities -- $\NHI\ge2\times10^{20}$\,\pcmsq, by definition -- shield the gas from ionising radiation, allowing it to remain highly neutral, presumably making DLA gas available for later star formation. DLAs also contain a large proportion of the universe's metals; studying their chemical abundances and metallicities -- and how these evolve with redshift -- are therefore important elements in understanding galaxy formation and evolution. DLA metal abundances and metallicities can be very accurately measured, owing to the simple relationship between optical depth and column density, and the neutrality of the DLA gas (i.e.\ no corrections for ionised hydrogen or metals are generally required). The most accurate and precise DLA metal-line measurements are possible in high-resolution spectra because the metal line velocity structures can be resolved. For these reasons, the DR1 quasar spectra offer an excellent opportunity for detailed studies of a large sample of DLAs. To assist such work, we have identified 155 DLAs towards the 467 DR1 quasars with final spectra and catalogued them in \Tref{t:dla}. While 137 of the DLAs in \Tref{t:dla} have previously been reported in the literature, the other 18 are reported here for the first time (to our knowledge). \setlength{\tabcolsep}{0.29em} \input{tab_dla} \setlength{\tabcolsep}{\oldtabcolsep} All DLA candidates were identified by visually checking the final DR1 spectrum; the very broad, damped \lya\ line-shape is easily identified in such high-resolution spectra. Note that we only report DLAs where the \lya\ transition is covered by the DR1 spectrum, even when a DLA has been previously reported\footnote{For example, the following DLAs were identified in the UVES DLA compilation of \citet{Zafar:2013:A141} from previous literature, but are not covered by the DR1 spectra: $\zab=3.776$ towards J000651$-$620803; $\zab=3.385$ towards J020346$+$113445; $\zab=3.178$ towards J033755$-$120404; and $\zab=3.448$ towards J142107$-$064356.}, and we do not report systems with $\NHI<2\times10^{20}$\,\pcmsq. The literature was then searched for previous reports of each DLA (primarily using the NASA Extragalactic Database and NASA Astrophysics Data System) and \NHI\ measurements. The \NHI\ measurement from the highest resolution, highest quality spectrum is provided in \Tref{t:dla} for these 137 literature DLAs. This sample builds on the 97 DLAs (with \lya\ coverage) identified in the ESO UVES ``advanced data products'' sample of 250 quasar spectra by \citet{Zafar:2013:A141} (which contains the original DLA references). Note that we identified two absorbers \citep[included in][]{Zafar:2013:A141}, originally discovered in low-resolution spectra, that the DR1 spectra clearly show are not DLAs; they have much lower column densities than the DLA threshold: $\zab=4.060$ towards J033829$+$002156 \citep{Peroux:2001:1799} and $\zab=1.947$ towards J034957$-$210247 \citep{Ellison:2001:393}. These are excluded from the DR1 DLA catalogue. We also found the previously reported redshift to be inaccurate (by more than $\sim$200\,\kms) for three DLAs and have corrected these in \Tref{t:dla}: $\zab=2.302$ towards J043403$-$435547 \citep{Ellison:2001:393}; $\zab=2.338$ towards J123437$+$075843 \citep{Zafar:2013:A141}; and $\zab=2.574$ towards J234625$+$124743 \citep{Zafar:2013:A141}. For the 18 new DLAs, \Tref{t:dla} provides the \NHI\ and representative uncertainty measured directly from the final DR1 spectra. To determine \NHI\ we overlaid a single-component Voigt profile over the spectrum with {\sc vpguess} \citep{Liske:2014:VPGUESS}. The redshift was fixed at the approximate optical-depth weighted mean centroid of unsaturated metal lines associated with the DLA, and the Doppler $b$ parameter was fixed to 20\,\kms. \NHI\ was then adjusted to best match the damped \lya\ profile shape. \Fref{f:egDLA} shows the \lya\ and strongest associated metal line transitions for one new DLA ($\zab=1.886$ towards J000443$-$555044). The Voigt profile with the best matching \NHI\ value is shown for comparison with the \lya\ transition. The Supporting Information online provides similar plots for all 18 new DLAs. \begin{figure*} \begin{center} \includegraphics[width=0.8\textwidth]{DR1_J000443_DLA.pdf} \caption{Example of a DLA in the DR1 spectrum of J000443$-$555044 that has not been previously reported (to our knowledge). Each panel shows the portion of spectrum (light grey histogram) covering the labelled transition in velocity space around a common redshift, $\zab=1.886$. To aid visual inspection, the spectrum has been Gaussian-smoothed (shown in the black line). A single-component Voigt profile with an \ion{H}{i} column density that best matches the \lya\ profile, $\NHI=10^{20.4\pm0.1}$\,\pcmsq, is shown in the upper panel (green dashed line; shading indicates the \NHI\ uncertainty). The ``F'' in the \tran{O}{i}{1302} panel indicates that is falls within the \lya\ forest.} \label{f:egDLA} \end{center} \end{figure*} The DR1 sample of 155 DLAs is the largest sample from a single high-resolution spectrograph to date. In this context it offers unprecedented opportunities for studying the range of chemical properties of DLAs, where the high resolution allows the detection and detailed modelling of the associated metal lines. However, it is important to note that a substantial proportion of UVES quasar observations specifically target quasars with known DLAs: \citet{Zafar:2013:A141} searched the proposal titles and abstracts for references to DLAs for their sample of 250 UVES quasar spectra, finding that $\sim$45 were targeted due to specific, known DLAs. While this ensures a large DLA sample from the DR1 spectra, it also ensures the sample is biased: DLAs will be over-represented in the DR1 spectra and their column density, redshift and metallicity distributions should be interpreted cautiously as a result. \Fref{f:DLAdists} illustrates the distribution of \ion{H}{i} column densities and absorption redshifts in the DR1 DLA sample. The \zab\ distribution in the lower panel is clearly a strong function of the emission redshift distribution (\Fref{f:sky+zem}), but also the wavelength coverage and detectability of broad \lya\ lines; the following section (\Sref{ss:surveys}) details these aspects. The lower panel of \Fref{f:DLAdists} compares the shape of the DR1 \lNHI\ distribution with the Gamma function fit to that of the SDSS DLA survey by \citet{Noterdaeme:2009:1087}. The fit has been scaled to yield a cumulative number of DLAs equal to the number in DR1. Overall, the relative representation of low and high \NHI\ DLAs in DR1 approximately reflects the expectation from a blind survey. However, very high \NHI\ DLAs are somewhat over-represented: there are 4 systems with $\NHI\ge21.7$ in DR1, while 0.8 are expected from the scaled fit from SDSS in \Fref{f:DLAdists}. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_DLAdists.pdf} \vspace{-1em} \caption{\ion{H}{i} column density and absorption redshift distributions for the 155 DLAs identified in the final DR1 spectra. Because the original UVES observations often targeted known DLAs, they will be significantly over-represented in the DR1 spectra. The upper panel compares the \lNHI\ distribution (histogram) shape with the Gamma function fit of the frequency distribution, $f(\NHI)$, derived from the SDSS survey by \citet{Noterdaeme:2009:1087}, scaled so that its integral for $\lNHI\ge20.3$ is 155 (orange line). The absorption redshift (\zab) distribution shown in the lower panel should be interpreted in conjunction with the emission redshift distribution in \Fref{f:sky+zem} and the \lya\ detection sensitivity functions in Figs.\ \ref{f:trancov} and \ref{f:gz}.} \label{f:DLAdists} \end{center} \end{figure} \subsection{Absorption line surveys}\label{ss:surveys} \Fref{f:trancov} illustrates the total DR1 spectral coverage of strong absorption lines that are commonly surveyed in cosmological studies: \ion{H}{i} \lya, \tran{O}{vi}{1031}, \tran{C}{iv}{1548}, \tran{Mg}{ii}{2796}, \tran{Si}{iv}{1393} and \tran{Ca}{ii}{3934}. The metal lines occur in doublets and so are easily identified, especially in high-resolution spectra because their (identical) velocity structures are resolved. In \Fref{f:trancov} only the strongest member of the doublets, as listed above, is considered. For \ion{H}{i} \lya, \Fref{f:trancov} includes only the \lya\ forest, i.e.\ the redshift path between the \lya\ and \lyb\ quasar emission lines, and for \ion{O}{vi}, which occurs only in the \lya\ forest, all redshifts up to the emission redshift are included. For \ion{C}{iv}, \ion{Mg}{ii}, {Si}{iv} and \ion{Ca}{ii}, the redshift path extends down to the \lya\ emission line (or zero redshift, for \ion{Ca}{ii}). \Fref{f:trancov} also shows the total number of DR1 spectra that cover each transition at any redshift (in parentheses in the legends). For example, 68\% of the spectra cover some part of the \lya\ forest, while almost all DR1 spectra cover \ion{Mg}{ii} (457 of 467) and \ion{Ca}{ii} (all but one of 467). Even with these simple considerations, it is clear from \Fref{f:trancov} that the DR1 spectra provide an extensive dataset in which to survey absorption lines at high resolution. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_trancoverage.pdf} \vspace{-1em} \caption{Number of DR1 quasar spectra covering the most commonly surveyed transitions as a function of redshift: \ion{H}{i} \lya, \tran{O}{vi}{1031}, \tran{C}{iv}{1548}, \tran{Mg}{ii}{2796}, \tran{Si}{iv}{1393} and \tran{Ca}{ii}{3934}. The total number of spectra covering each transition, integrated over all redshifts, is given in parentheses after the species label. Note the different vertical axis ranges for the top and bottom panels.} \label{f:trancov} \end{center} \end{figure} However, there are usually many other considerations required for statistical absorption line surveys in order to account for selection effects and biases. Typically, the redshift sensitivity function, $g(z)$, is a key metric: this is the total number of spectra in which the spectral feature being considered can, in principle, be detected once known selection effects are accounted for. That is, $g(z)$ is the spectral coverage of a given transition, or doublet, once additional detection effects are included, such as the \SN, proximity to quasar emission lines, telluric features and broad absorption lines that may preclude detection etc. \Fref{f:gz} presents $g(z)$ for the \lya\ forest, and the most commonly surveyed strong metal-line doublets outside the forest, i.e.\ \ion{C}{iv}\,$\lambda\lambda$1548/1550, \ion{Mg}{ii}\,$\lambda\lambda$2796/2803, \ion{Si}{iv}\,$\lambda\lambda$1393/1402 and \ion{Ca}{ii}\,$\lambda\lambda$3934/3969. Here, $g(z)$ masks out the strongest regions of telluric absorption (the O$_2$ and H$_2$O bands with $<$80\% transmitted flux in \Fref{f:telluric}). For all species we excluded the region 3000\,\kms\ bluewards of the quasar emission redshift to avoid proximity effects from the quasar. For the metal doublets, $g(z)$ extends down to 3000\,\kms\ redwards of the \lya\ emission line, while for \lya\ it extends down to 3000\,\kms\ redwards of the \lyb\ emission line to avoid including \lyb\ absorption. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_gz.pdf} \vspace{-1em} \caption{Total redshift sensitivity function, $g(z)$, of the DR1 spectra for \ion{H}{i} \lya\ and the most commonly surveyed metal-line doublets outside the \lya\ forest: \ion{C}{iv}\,$\lambda\lambda$1548/1550, \ion{Mg}{ii}\,$\lambda\lambda$2796/2803, \ion{Si}{iv}\,$\lambda\lambda$1393/1402 and \ion{Ca}{ii}\,$\lambda\lambda$3934/3969. The strongest telluric absorption features (\Fref{f:telluric}) and the proximity zones near the relevant emission lines are excluded from $g(z)$, causing its shape to differ from the corresponding wavelength coverage function in \Fref{f:trancov}. $g(z)$ is calculated for four representative continuum-to-noise ratio (\CN) values for each species. This illustrates the large number of spectra in which relatively weak absorption lines can be detected.} \label{f:gz} \end{center} \end{figure} The $g(z)$ functions in \Fref{f:gz} have broadly similar shapes and magnitudes to their raw wavelength coverage counterparts in \Fref{f:trancov}. However, avoiding the telluric and proximity regions reduces the number of sight-lines at all redshifts and reduces the sensitivity to zero for significant path lengths in the metal doublets, particularly \ion{Mg}{ii} and \ion{Ca}{ii}. To demonstrate how absorption line surveys will depend on the data quality, $g(z)$ is calculated for four representative \CN\ values for each species. This utilised the 1000-\kms-binned \CN\ maps of the quasar spectra described in \Sref{sss:CNR}. Note that the $g(z)$ functions in \Fref{f:gz} do not avoid other selection effects, such as broad absorption-line quasars, that some users should take into account for specific surveys. Finally, as is the case for DLAs (\Sref{ss:dlas} above), many DR1 quasars were targeted for observations with UVES due to the presence of metal-line absorbers. We therefore urge caution in determining the incidence rates of such systems with the DR1 sample. However, the proposal names and abstracts for the original observations do not indicate targetting of specific, weak absorbers. That is, there is likely little or no such bias for absorbers with equivalent widths below the detection threshold for lower-resolution surveys (e.g.\ $\la$0.2\,\AA\ for SDSS spectra). The $g(z)$ functions in \Fref{f:gz} indicate that very large redshift path lengths can be surveyed with the DR1 spectra, even for weak systems where higher \CN\ spectra are required (i.e.\ $\CN\ge20$). \subsection{Time-variability studies using sub-spectra} The DR1 spectra were taken over a 16-year period, so they are potentially useful in constraining the time-variability of absorption systems. This is particularly relevant for both narrow and broad metal absorption lines associated with outflows from the quasar central engines and/or host galaxies, as these lines are often observed to vary on time-scales shorter than $\sim$10 years \citep[e.g][]{Hamann:2011:1957,RodriguezHidalgo:2013:14}. Amongst the 467 quasars with final DR1 spectra, 92 comprise exposures taken more than a year apart, and 11 were observed over more than a decade interval. Significantly time-variable absorption lines can be easily identified in the final spectra using the same methods used to identify artefacts in \Sref{sss:artefacts} (see \Fref{f:baddata}), particularly by using \popler\ to simultaneously view the flux and $\chi^2_\nu$ spectra. Once time-variable absorption has been identified -- or if it is suspected but higher \SN\ is required for detection -- \popler\ includes a facility to create ``sub-spectra'' for comparative analyses: subsets of the original exposures can be selected for combination after the manual phase. This allows, for example, the best possible combination of exposures taken in one year to be compared with that from another year, and to identify or study time-variable absorption with the highest possible \SN\ in each case. The advantage of this approach is that sub-spectra are only split into separate spectra after being processed, cleaned, and continuum-fitted together; their treatment has been identical and observed differences between them are therefore more easily studied and interpreted. \popler\ sub-spectra have previously been employed in isolating instrumental systematic effects in UVES and HIRES spectra for studies of possible variations in the fine-structure constant and proton-to-electron mass ratio \citep[e.g.][]{Dapra:2015:489,Murphy:2016:2461,Murphy:2017:4930}. However, \Fref{f:broadCIV} illustrates their utility in studying time-variability in a broad absorption-line system that is highly blueshifted with respect to the quasar;, presumably due to a high-velocity outflow. A systematic search for, and study of, time-variable absorption lines in, e.g., BAL quasars could therefore be undertaken using the full DR1 data products in \citet[][i.e.\ both the final spectrum and the extracted, contributing exposures]{Murphy:2018:UVESSQUADDR1}. \begin{figure} \begin{center} \includegraphics[width=0.95\columnwidth]{DR1_broadCIV.pdf} \vspace{-1em} \caption{Example of a time-variable \ion{C}{iv} broad absorption line feature $\sim$21000\,\kms\ bluewards of the emission redshift of J005758$-$264314. Two ``sub-spectra'' are shown (see text), comprising 11 and 13 exposures from Sep.\ 2000 and 2013, respectively. All exposures were combined to form the DR1 final spectrum of this quasar, but its $\chi^2_\nu$ spectrum (not shown) increases dramatically in the spectral region shown, sign-posting potential time-variable absorption. Each sub-spectrum (histograms) has been smoothed with a 10-pixel-wide Gaussian kernel for clarity (lines). Note that the absorption redwards of 6865\,\AA\ is the telluric O$_2$ B band.} \label{f:broadCIV} \end{center} \end{figure} \section{Conclusions}\label{s:conclusion} We have presented the first data release of the UVES SQUAD: 475 quasars, 467 of which have final, combined spectra (see \Tref{t:cat}). This is the largest database of high-resolution quasar spectra. The DR1 spectra are fully reproducible -- from the raw archival UVES exposures to final, combined and continuum-fitted spectra -- with a few commands using open-source software. All reduced exposures and files required to produce the final spectra and figures in this paper are provided in \citet{Murphy:2018:UVESSQUADDR1}. We also documented software tools for preparing and executing the UVES data reduction pipeline \citep[\headsort;][]{Murphy:2016:UVESheadsort}, and for combining the extracted exposures of a quasar, and cleaning and continuum fitting the result \citep[\popler;][]{Murphy:2016:UVESpopler}. The latter tool enables users of the DR1 spectra to improve the spectra, or modify them for their particular projects; we encourage and welcome improvements in this way and can make them available in future data releases. A primary motivation for constructing DR1 was to enable statistical analyses of large quasar and absorber samples, particularly those requiring high resolution spectra (e.g.\ weak metal lines). We highlighted three broad areas where the DR1 spectra may be especially useful: DLA studies, absorption-line surveys, and studies of variable absorption, particularly outflows from quasars. To assist DLA studies, we presented a catalogue of 155 DLAs whose \lya\ lines are recorded in the DR1 spectra, 18 of which have not been reported previously (see \Tref{t:dla}). The \ion{H}{i} column densities from the literature, plus values for the 18 new DLAs measured directly from the DR1 spectra. For absorption line surveys, the redshift sensitivity functions of the DR1 sample are presented in \Fref{f:gz} for the most commonly surveyed ionic species (\ion{H}{i}, \ion{Si}{iv}, \ion{C}{iv}, \ion{Mg}{ii} and \ion{Ca}{ii}). Indeed, several absorption-line surveys have already been conducted using earlier, preliminary versions of the UVES SQUAD spectra \citep[e.g.][]{Richter:2011:A12,BenBekhti:2012:A110,Nielsen:2013:114,Mas-Ribas:2018:50,Mathes:2018}. To assist studies of variable absorption lines, the \popler\ software can be used to construct ``sub-spectra'': sub-sets of exposures combined in the same way, with the same continuum, as the final spectrum. The highest \SN\ sub-spectra from different epochs can then be compared as accurately as possible. Finally, while we have invested considerable effort in cleaning all DR1 spectra to a minimum standard, many artefacts remain and are important to consider for such applications -- see \Sref{ss:artefacts}. The second data release of UVES SQUAD is currently in preparation. It is anticipated to include spectra of another $\sim$180 quasars whose first exposure was taken between 30 June 2008 (the end date for first exposures in DR1) and early 2018. DR1 quasars with new exposures taken after 30 June 2008 will also be updated in DR2. While we have attempted to identify every quasar observed by UVES in the DR1 acquisition period (see \Sref{s:selection}), it remains possible that a small number may have escaped our selection criteria. We welcome knowledge of such quasars from the community and will include spectra of these in DR2 where possible. \section*{Acknowledgements} We thank Julija Bagdonaite, Tyler Evans, Sr\dj an Kotu\v{s}, Adrian Malec, Max Spolaor and Jonathan Whitmore for assistance in preliminary reduction and cleaning of several spectra, and Lachlan Thomson and Luka Giles for initial assistance with the DLA catalogue. We thank John Webb for work and discussions on the earlier database of {\sc midas}-reduced UVES spectra that contributed to \citet{King:2012:3370}. MTM thanks the Australian Research Council for \textsl{Discovery Projects} grants DP0877998, DP110100866 and DP130100568, and GGK and MTM thank it for \textsl{Discovery Projects} grant DP170103470, which supported this work. The UVES SQUAD spectra are based on observations collected at the European Organisation for Astronomical Research in the Southern Hemisphere under ESO programmes listed in \Tref{t:cat}. This research made use of: the NASA/IPAC Extragalactic Database (NED), which is operated by the Jet Propulsion Laboratory, California Institute of Technology, under contract with the National Aeronautics and Space Administration; NASA's Astrophysics Data System; {\sc astropy} \citep{Astropy:2013:A33}; and {\sc matplotlib} \citep{Hunter:2007:90}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%% REFERENCES %%%%%%%%%%%%%%%%%% % The best way to enter references is to use BibTeX: \bibliographystyle{mnras} \bibliography{ref} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%% APPENDICES %%%%%%%%%%%%%%%%%%%%% %\appendix %\section{Appendix name}\label{a:name} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section*{Supporting Information}\label{sec:supp} Additional Supporting Information may be found in the online version of this article:\vspace{-0.5em}\newline \noindent \textbf{DR1\_quasars\_master.csv.} Complete version of \Tref{t:cat}, incorporating the DLA information from \Tref{t:dla}.\vspace{-0.1em}\newline \noindent \textbf{DR1\_DLAs.pdf.} Velocity plots of all 18 new DLAs reported in \Tref{t:dla}, similar to the example in \Fref{f:egDLA}.\vspace{-0.5em}\newline \noindent Please note: Oxford University Press are not responsible for the content or functionality of any supporting materials supplied by the authors. Any queries (other than missing material) should be directed to the corresponding author for the paper. % Don't change these lines \bsp % typesetting comment \label{lastpage} \end{document} % End of mnras_template.tex
{ "alphanum_fraction": 0.7852221949, "avg_line_length": 163.028189911, "ext": "tex", "hexsha": "f8937ab56003d4381c930e06ba76a8c7b72b8683", "lang": "TeX", "max_forks_count": 7, "max_forks_repo_forks_event_max_datetime": "2021-01-23T11:35:16.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-08T00:13:46.000Z", "max_forks_repo_head_hexsha": "a0cdc8e7b99f2b01a45d919988af9d60e6447d19", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "MTMurphy77/UVES_SQUAD_DR1", "max_forks_repo_path": "Paper/src/paper_submitted_2018-08-10.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "a0cdc8e7b99f2b01a45d919988af9d60e6447d19", "max_issues_repo_issues_event_max_datetime": "2018-10-18T13:15:48.000Z", "max_issues_repo_issues_event_min_datetime": "2018-10-18T00:48:03.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "MTMurphy77/UVES_SQUAD_DR1", "max_issues_repo_path": "Paper/src/paper_submitted_2018-08-10.tex", "max_line_length": 1994, "max_stars_count": 4, "max_stars_repo_head_hexsha": "a0cdc8e7b99f2b01a45d919988af9d60e6447d19", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "MTMurphy77/UVES_SQUAD_DR1", "max_stars_repo_path": "Paper/src/paper_submitted_2018-08-10.tex", "max_stars_repo_stars_event_max_datetime": "2021-06-16T14:41:10.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-28T10:46:39.000Z", "num_tokens": 26703, "size": 109881 }
\chapter*{Abstract} This thesis deals with the problem of generating test data for indoor localization and mapping systems using acoustic sources (speakers) and receivers (microphones) in a given environment. In particular methods of ray tracing and ultrasonic path evaluation will be explored and presented. The suggested methods have been implemented in an Acoustic Ray Tracing Simulator (ARTS). \newline ARTS is able to generate test sample data for a set of receivers by approximating the propagation of sound waves from acoustic sources as rays in a 3D modeled environment. The geometry of the environment is provided by 3D data from tools which support the Wavefront OBJ file format. Using this data and the geometric ray tracing method, reflections up to third order are being computed and sampled at the receivers end. The sampled data can then be used in localization or mapping systems to evaluate different environments and setups.
{ "alphanum_fraction": 0.8239660657, "avg_line_length": 104.7777777778, "ext": "tex", "hexsha": "7e82f1f11f0de68ed46ca9a72e670f564b831d85", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1e14a5ad0c5f309afe92777bac7850dec2fbc7ba", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "mguc/arts", "max_forks_repo_path": "thesis/chapters/0_2-abstract.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "1e14a5ad0c5f309afe92777bac7850dec2fbc7ba", "max_issues_repo_issues_event_max_datetime": "2021-10-10T22:31:41.000Z", "max_issues_repo_issues_event_min_datetime": "2021-10-10T22:31:41.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "mguc/arts", "max_issues_repo_path": "thesis/chapters/0_2-abstract.tex", "max_line_length": 188, "max_stars_count": null, "max_stars_repo_head_hexsha": "1e14a5ad0c5f309afe92777bac7850dec2fbc7ba", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "mguc/arts", "max_stars_repo_path": "thesis/chapters/0_2-abstract.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 179, "size": 943 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Twenty Seconds Resume/CV % LaTeX Template % Version 1.0 (14/7/16) % % Original author: % Carmine Spagnuolo ([email protected]) with major modifications by % Vel ([email protected]) and Harsh ([email protected]) % % License: % The MIT License (see included LICENSE file) % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %---------------------------------------------------------------------------------------- % PACKAGES AND OTHER DOCUMENT CONFIGURATIONS %---------------------------------------------------------------------------------------- \documentclass[letterpaper]{twentysecondcv} % a4paper for A4 % Command for printing skill overview bubbles \newcommand\skills{ \\ ~ \\ \smartdiagram[bubble diagram]{ \textbf{Image}\\\textbf{Processing}, \textbf{Computer}\\\textbf{Vision}, \textbf{Machine}\\\textbf{~~~Learning~~~}, \textbf{Validation}, \textbf{Data}\\\textbf{Analysis}, \textbf{Medical}\\\textbf{Imaging}, \textbf{Data}\\\textbf{~~~Science~~~} } } % Plot programming skill bars \programming{{ SQL $\textbullet$ Java / 2.5}, {R $\textbullet$ Matlab $\textbullet$ \large \LaTeX / 4}, {Python $\textbullet$ C++ / 5.5}} % Projects text \languages{ \noindent\begin{tabular}{@{}p{12mm}@{\hspace{3mm}}p{5cm}@{}} \textbf{English} & - Fluent \\ \textbf{Swedish} & - Fluent \\ \textbf{French} & - Intermediate \end{tabular} } %---------------------------------------------------------------------------------------- % PERSONAL INFORMATION %---------------------------------------------------------------------------------------- % If you don't need one or more of the below, just remove the content leaving the command, e.g. \cvnumberphone{} \cvname{Jenny} % Your name \cvlastname{Folkesson} \cvjobtitle{Data Scientist} % Job \cvlinkedin{/in/jennyfolkesson} \cvmail{[email protected]} % Email address \cvgithub{jennyfolkesson} % Github account \cvnumberphone{(415) 205 8913} % Phone number \cvsite{goo.gl/CxJHcB} % Publications website %---------------------------------------------------------------------------------------- \begin{document} % Print the sidebar \makeprofile %---------------------------------------------------------------------------------------- % EDUCATION %---------------------------------------------------------------------------------------- Motivated researcher with ten-plus years of experience in image processing and familiar with all aspects of the product development cycle. \\ I'm passionate about using technology to improve the way we live and strive to keep learning. I enjoy working independently and in collaborations and believe most problems can be solved with a combination of curiosity and dedication. \section{Experience} \begin{twenty} %\twentyitem{<dates>}{<title>}{<location>}{<description>} \twentyitem {2015 - Now} {} {Data Scientist} {\href{http://www.3scan.com/}{\textbf{3Scan}}} {} { \begin{itemize} \begin{raggedright} \item Develop image processing and machine learning algorithms for analysis of TB-scale knife edge scanning microscopy data processed in a highly distributed environment (AWS). Algorithms include convolutional neural networks for object recognition and segmentation, flatfield and chromatic aberration correction, clustering and classification. \item Main analytics contributor to an image quality control project for detection and quantification of imaging artifacts. \item Work closely with other departments (biology, hardware, optics) to identify and quantify key performance indicators. \end{raggedright} \end{itemize} } \\ \twentyitem {2011 - 2015} {} {Senior Scientist} {\href{https://theranos.com/}{\textbf{Theranos}}} {} { \begin{itemize} \begin{raggedright} \item Primary contributor of the fluorescence microscopy image processing software. Methods included image segmentation, stitching, feature extraction and selection and classification. The software passed strict accuracy and precision benchmarks. \item Developed machine vision production code for inspection and validation of device robotics using object detection and tracking, texture detection and change point analysis in or near real time. \item Wrote software related to user interfaces, including methods such as image registration, text detection and optical character recognition. \end{raggedright} \end{itemize} } \\ \twentyitem {2008 - 2011} {} {Postdoctoral Researcher} {\href{https://radiology.ucsf.edu/research/labs/musculoskeletal-quantitative-imaging}{\textbf{UCSF}}} {} { \begin{itemize} \begin{raggedright} \item Leading role in a magnetic resonance imaging based pilot study of a potential osteoporosis treatment for \href{http://www.merck.com/}{\textbf{Merck}}. ​It was the largest longitudinal MRI-based study of the treatment at the time, using statistical analysis to show significant changes in bone parameters over time. \item Developed and evaluated a framework for trabecular bone analysis for osteoporosis research including fuzzy clustering, texture recognition, nonparametric regression, and image registration. Resulting publications showing significantly improved sensitivity and precision compared to previous methods. \item Management and maintenance of laboratory data, in house developed software, and hardware resources. \end{raggedright} \end{itemize} } \end{twenty} %---------------------------------------------------------------------------------------- % EXPERIENCE %---------------------------------------------------------------------------------------- \section{Education} \begin{twenty} % Environment for a list with descriptions \twentyitem {2004 - 2007} {} {Ph.D. in Computer Science} {\href{http://www.diku.dk/english/}{\textbf{University of Copenhagen}}} {} { \begin{itemize} \begin{raggedright} \item Developed one of the first fully automatic cartilage segmentation methods, based on statistical classifiers trained using feature selection of local geometry descriptors. The research resulted in a number of publications, patents and commercially used software. \item Part of the research was performed at the \href{https://lmi.med.harvard.edu/}{\textbf{Laboratory for Mathematics in Imaging}}. \item Close collaboration with with clinicians, biochemists, radiologists and statisticians at \href{http://www.nordicbioscience.com/}{\textbf{Nordic Bioscience}} for establishment of novel biomarkers that proved to be clinically relevant and increased sensitivity to disease progress. \end{raggedright} \end{itemize} } \end{twenty} \end{document}
{ "alphanum_fraction": 0.6312148725, "avg_line_length": 44.1974522293, "ext": "tex", "hexsha": "3139e2d12bbcebd0d10d3530f5b8e5a997362c0f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a5ada0f068ea63ad1b28c279299600bcbf1dba11", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "jennyfolkesson/Data-Engineer-Resume-LaTeX", "max_forks_repo_path": "resume.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a5ada0f068ea63ad1b28c279299600bcbf1dba11", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "jennyfolkesson/Data-Engineer-Resume-LaTeX", "max_issues_repo_path": "resume.tex", "max_line_length": 352, "max_stars_count": null, "max_stars_repo_head_hexsha": "a5ada0f068ea63ad1b28c279299600bcbf1dba11", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "jennyfolkesson/Data-Engineer-Resume-LaTeX", "max_stars_repo_path": "resume.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1517, "size": 6939 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Twenty Seconds Resume/CV % LaTeX Template % Version 1.1 (8/1/17) % % This template has been downloaded from: % http://www.LaTeXTemplates.com % % Original author: % Carmine Spagnuolo ([email protected]) with major modifications by % Vel ([email protected]) % % License: % The MIT License (see included LICENSE file) % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %---------------------------------------------------------------------------------------- % PACKAGES AND OTHER DOCUMENT CONFIGURATIONS %---------------------------------------------------------------------------------------- \documentclass[letterpaper]{style} % a4paper for A4 %---------------------------------------------------------------------------------------- % PERSONAL INFORMATION %---------------------------------------------------------------------------------------- % If you don't need one or more of the below, just remove the content leaving the command, e.g. \cvnumberphone{} \profilepic{foto} % Profile picture \cvname{Alan Fernando\\Rincón Vieyra} % Your name \cvjobtitle{Computer Systems Engineer} % Job title/career \cvdate{23-Dec-1995} % Date of birth \cvnationality{Mexican} \cvaddress{Del. Miguel Hidalgo, Mexico City, Mexico.} % Short address/location, use \newline if more than 1 line is required \cvnumberphone{(+52) 5591250145} % Phone number \cvmail{[email protected]} % Email address \cvsite{} % Personal website \cvlinkedin{https://linkedin.com/in/AlanVieyra333} % LinkedIn account \cvgithub{http://github.com/AlanVieyra333} % Personal website \cvfb{} % Facebook account %---------------------------------------------------------------------------------------- \begin{document} %---------------------------------------------------------------------------------------- % ABOUT ME %---------------------------------------------------------------------------------------- \aboutme{} % To have no About Me section, just remove all the text and leave \aboutme{} %---------------------------------------------------------------------------------------- % SKILLS %---------------------------------------------------------------------------------------- % Skill bar section, each skill must have a value between 0 an 10 (float) \skills{ {Java, C++/9.5}, {Linux (Debian, Centos, Arch)/9.5}, {Docker, OpenShift/9.5}, {Angular 8, JavaScript, HTML5, CSS3/9.5}, {Git, Web Services Rest y SOAP/9.5}, {Jenkins, Maven, Ansible, Shell/9}, {MySQL, SQL Oracle/8.5}, {Mobile apps Android, iOS/8}, {Mobile apps multiplatform - Flutter/8}} % Skill bar section, each skill must have a value between 0 an 10 (float) \skillsSecond{ {Python, ML frameworks/8}, {AWS, Google Cloud Platform/7.5}, {Servers WebLogic and WebSphere/6.5}, {UML, LaTex/6.5}, {PHP, C\#, Golang/6}, { Video games development- Unity/5}, {Lisp, TensorFlow, Prolog/4}} %------------------------------------------------ % Skill text section, each skill must have a value between 0 an 6 \skillstext{{Drone programming,},{Microcontrollers programming,},{Teamwork,},{Good programming practices.}} %---------------------------------------------------------------------------------------- % LANGUAGES %---------------------------------------------------------------------------------------- % Languages bar section, each language must have a value between 0 an 10 (float) \languages{{Spanish - Native/10}, {English - Intermediate/7}} %---------------------------------------------------------------------------------------- \makeprofile % Print the sidebar %---------------------------------------------------------------------------------------- % EDUCATION %---------------------------------------------------------------------------------------- \cvsection{Education} \begin{twenty} % Environment for a list with descriptions %\twentyitem{<dates>}{<title>}{<location>}{<description>} \twentyitem{2019-2020}{Master of Computer Science | A.I. | Incomplete}{México}{CINVESTAV - IPN } \twentyitem{2014-2018}{Computer Systems Engineering - Intern}{México}{Escuela Superior de Cómputo - Instituto Politécnico Nacional} \twentyitem{2010-2014}{Technical Baccalaureate Certificate}{México}{CECyT 9 “Juan de Dios Bátiz” - Instituto Politécnico Nacional} \twentyitem{2007-2010}{High school certificate}{México}{Escuela Secundaria Anexa a la Normal Superior} \end{twenty} %---------------------------------------------------------------------------------------- % COURSES %---------------------------------------------------------------------------------------- \section{Courses} \begin{twentyshort} % Environment for a short list with no descriptions %\twentyitemshort{<dates>}{<title/description>} \twentyitemshort{2018}{Mobile app development with Flutter}{Udemy} \twentyitemshort{2018}{Scrum Fundamentals}{SCRUMstudy} \twentyitemshort{2016}{Neural networks, Stanford University}{Coursera} \twentyitemshort{2015}{Competitive programming and algorithm analysis}{ESCOM - IPN.} \end{twentyshort} %---------------------------------------------------------------------------------------- % CERTIFICATES %---------------------------------------------------------------------------------------- \section{Certificates} \begin{twentyshort} \twentyitemshort{2013}{Java SE (\emph{\href{https://www.youracclaim.com/badges/583548b1-06fa-4dbf-889d-419e6c6c2ec1/public_url}{show}})}{Oracle} \end{twentyshort} %---------------------------------------------------------------------------------------- % AWARDS %---------------------------------------------------------------------------------------- \section{Achievements} \begin{twentyshort} \twentyitemshort{2019}{Participant of the \textbf{Code Jam} programming competition}{Google} \twentyitemshort{2015-2017}{Competing team on stage selection of \textbf{ACM-ICPC}\space\space\space\space}{International Collegiate Programming Contest} \twentyitemshort{2014}{2nd place in the competition Coding Rush}{ITAM} \twentyitemshort{2013}{2nd place in the 18th Mexican Olympiad Computer Science }{CDMX} \end{twentyshort} %---------------------------------------------------------------------------------------- % EXPERIENCE %---------------------------------------------------------------------------------------- \cvsection{Experience} \cvevent{Full Stack Developer}{Interfell}{January 2021 -- Present}{Remote work} \begin{itemize} \item Develop web solutions with \textbf{React JS}. \item Develop microservices with \textbf{Java Spring Boot}. \item Use of databases in \textbf{PostgreSQL}. \item Use of \textbf{GIT Flow}. \end{itemize} \divider \cvevent{DevOps engineer semi Sr}{EVERIS BPO MÉXICO, S. DE R.L. DE C.V. - TELCEL}{August 2019 -- December 2020}{Plaza Carzo, CDMX, México} \begin{itemize} \item \textbf{Docker} container building and handling orchestrated by \textbf{OpenShift}. \item Infrastructure as code with \textbf{Ansible} to automate the configuration of environments in the \textbf {OpenShift} platform. \item Pipelines development in \textbf{Jenkins} for CI/CD, with deployment in a \textbf{Nexus} repository. \item Microservices development using \textbf{Spring} and \textbf{Java}. \item Anomaly detection in \textbf{Elsatic} using \textbf{Python} and the \textbf{Pandas} library. \item Process optimization using C++. \end{itemize} %---------------------------------------------------------------------------------------- % SECOND PAGE EXAMPLE %---------------------------------------------------------------------------------------- \newpage % Start a new page \makeprofileSecond % Print the sidebar \cvevent{Architect ESB}{HITSS CONSULTING S.A. DE C.V. - TELCEL}{March 2019 -- August 2019}{Plaza Carzo, CDMX, México} \begin{itemize} \item Development of integration services between applications of the various systems of the organization using the tool \textbf{IBM Integration Bus}, \textbf{Websphere MQ}, \textbf{Spring}, \textbf{Java} and communication protocols \textbf{SOAP} y \textbf{REST}. \item Container handling \textbf{Docker} orchestrated with \textbf{OpenShift}. \item Management \textbf {Jenkins} y \textbf{Maven} for continuous integration. \end{itemize} \divider \cvevent{Systems Analyst Sr}{HITSS CONSULTING S.A. DE C.V. - TELMEX}{June 2018 -- March 2019}{Coyoacán, CDMX, México} \begin{itemize} \item Development of a mobile application for \textbf{iOS} using \textbf{Swift}. \item Development of web applications with use of: \textbf{Java}, \textbf{PrimeFaces}, \textbf{Spring}, \textbf{JSF}, \textbf{JPA}, \textbf{DAO} and servers \textbf{WebLogic} and \textbf{Tomcat}. \item Management \textbf{MySQL} y \textbf{Oracle SQL} for database manipulation using JOINS, stored procedures as well as triggers. \end{itemize} %\divider %\cvevent{Systems Analyst Jr}{Coordinación de Desarrollo Tecnológico – ESCOM - IPN}{January 2018 -- June 2018}{Zacatenco, CDMX, México} %\begin{itemize} % \item Perform the lifting of \textbf{requirements} with the user. % \item Perform requirements management, including control and \textbf{documentation}. % \item Generate the project deliverables of the \textbf{Analysis} and \textbf{Design}. % \item Diagram handling \textbf{UML} for modeling the project. %\end{itemize} \divider \cvevent{Bachelor's thesis}{Escuela Superior de Cómputo - IPN}{January 2018 -- December 2018}{Zacatenco, CDMX, México} \begin{itemize} \item Development of a prototype for home delivery, programming a drone with the library Ardupilot en \textbf{C++}, \textbf{microcontrollers} and management of the methodology \textbf{SCRUM}. \item Development of a web application for user interaction with the drone, using \textbf{Angular 6} and \textbf{Python} with \textbf{Django}. \end{itemize} \divider \cvevent{Developer Front Jr}{Nova Solutions Systems S.A. DE C.V. - MULTIVA}{March 2017 -- December 2017}{Polanco, CDMX, México} \begin{itemize} \item Microservices development using \textbf{Spring} and \textbf{Java}. \item Make modules and components for the web application using \textbf{Angular} as a development tool, as well as \textbf{HTML5}, \textbf{CSS3} y \textbf{JavaScript}. \item Manejo de \textbf{Git} for version control and teamwork. \end{itemize} %\section{Other information} %\subsection{Review} %Alice approaches Wonderland as an anthropologist, but maintains a strong sense of noblesse oblige that comes with her class status. She has confidence in her social position, education, and the Victorian virtue of good manners. Alice has a feeling of entitlement, particularly when comparing herself to Mabel, whom she declares has a ``poky little house," and no toys. Additionally, she flaunts her limited information base with anyone who will listen and becomes increasingly obsessed with the importance of good manners as she deals with the rude creatures of Wonderland. Alice maintains a superior attitude and behaves with solicitous indulgence toward those she believes are less privileged. %\section{Other information} %\subsection{Review} %Alice approaches Wonderland as an anthropologist, but maintains a strong sense of noblesse oblige that comes with her class status. She has confidence in her social position, education, and the Victorian virtue of good manners. Alice has a feeling of entitlement, particularly when comparing herself to Mabel, whom she declares has a ``poky little house," and no toys. Additionally, she flaunts her limited information base with anyone who will listen and becomes increasingly obsessed with the importance of good manners as she deals with the rude creatures of Wonderland. Alice maintains a superior attitude and behaves with solicitous indulgence toward those she believes are less privileged. %---------------------------------------------------------------------------------------- \end{document}
{ "alphanum_fraction": 0.6115563054, "avg_line_length": 48.5860655738, "ext": "tex", "hexsha": "0673dcf39a7c01bbe874b62b8d4f7b36bed026f0", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8cd43f768a7c59a0ec299a8b96f8d292cd27833d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "AlanVieyra333/curriculum-vitae", "max_forks_repo_path": "cv.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8cd43f768a7c59a0ec299a8b96f8d292cd27833d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "AlanVieyra333/curriculum-vitae", "max_issues_repo_path": "cv.tex", "max_line_length": 696, "max_stars_count": null, "max_stars_repo_head_hexsha": "8cd43f768a7c59a0ec299a8b96f8d292cd27833d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "AlanVieyra333/curriculum-vitae", "max_stars_repo_path": "cv.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2797, "size": 11855 }
\documentclass[main.tex]{subfiles} \begin{document} \marginpar{Wednesday\\ 2021-12-1} Last time we reached the expression for the plane-wave solution to GR. Consider a wave propagating along the \(x^{1}\) direction. Our equation is \(\square _F \overline{h}_{\mu \nu } = 0 \), to be solved together with our gauge condition \(\partial_{\mu } \overline{h}^{\mu }{}_\nu = 0\). The solution will only depend on \(t - x /c\), which we denote as \(w\). We will have % \begin{align} \pdv{}{x} \overline{h}^{\mu }{}_{\nu } &= - \frac{1}{c} \pdv{}{w } \overline{h}^{\mu }{}_\nu \\ \pdv{}{t} \overline{h}^{\mu }{}_{\nu } &= \pdv{}{w } \overline{h}^{\mu }{}_\nu \,, \end{align} % so, the full condition reads % \begin{align} \partial_{\mu } \overline{h}^{\mu }{}_{\nu } = \frac{1}{c} \pdv{}{t} \overline{h}^{0}{}_\nu + \pdv{}{x} \overline{h}^{x }{}_\nu = \frac{1}{c} \pdv{}{w} \qty[h^{0}{}_{\nu } - h^{x}{}_{\nu }] = 0 \,. \end{align} The constant the difference of these perturbations are equal to is inessential --- it can be recovered with a rescaling of, say, background time --- so we have % \begin{align} h^{0}{}_{\nu } = h^{x}{}_{\nu } \,. \end{align} The gauge we chose did not fully determine our transformation: equation \eqref{eq:christoffel-gauge-transformation} will not change if we use an additional \(\xi \) such that \(\square \xi =0 \). It is possible to choose four more conditions thanks to this: % \begin{align} 0 = \overline{h}^{0}{}_{x} = \overline{h}^{0}{}_{y} = \overline{h}^{0}{}_{z} = \overline{h}^{y}{}_{y} + \overline{h}^{z}{}_{z} \,. \end{align} Together with the ones from before, we find % \begin{align} 0 = \overline{h}^{x}{}_{x} = \overline{h}^{x}{}_{y} = \overline{h}^{x}{}_{z} = \overline{h}^{0}{}_{0} \,, \end{align} % which also means \(h = 0 = \overline{h}\) (since \(h = - \overline{h}\)), therefore \(\overline{h}_{\mu \nu } = h_{\mu \nu } \). The only two nonvanishing components left are \(h^{y}{}_{y} = - h^{z}{}_z\) and \(h^{x}{}_{y} = h^{y}{}_{x}\). We finally have % \begin{align} h_{\mu \nu } = \left[\begin{array}{cccc} 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & h_{+} & h_{\times } \\ 0 & 0 & h_{\times } & -h_{+} \end{array}\right] \,. \end{align} This is the \textbf{transverse-traceless gauge}. \subsection{The quadrupole approximation} We will assume that \(T_{\mu \nu } \neq 0\), but that the source of the GW is all contained within a source such that \(\abs{x'} < \epsilon\), and such that \(\epsilon \ll \lambda_{GW} = 2 \pi c / \omega \). This means that \(\omega \epsilon / 2 \pi \sim v _{\text{source}} \ll c\). This line of reasoning assumes that \(\omega _{\text{source}} \sim \omega _{\text{GW}} \), which we do not know yet --- we will later find that it is in fact true within a factor 2. The GW solution reads % \begin{align} \overline{h}_{\mu \nu }(t, \vec{x}) &= \frac{4 G}{c^{4}} \int_{V} \frac{T_{\mu \nu } (t - \abs{x - x'} / c, c') \dd[3]{x'}}{\abs{x - x'}} \,, \end{align} % while in Fourier space we can expand % \begin{align} T_{\mu \nu } (t, \vec{x}) = \int \widetilde{T}_{\mu \nu } (\omega , \vec{x}) e^{-i \omega t} \dd{\omega } \,. \end{align} We then get % \begin{align} \int \overline{h}_{\mu \nu } (\omega , \vec{x}) e^{-i \omega t} \dd{\omega } = \frac{4 G}{c^{4}} \int_{V} \frac{ \dd[3]{x'}}{\abs{x - x'}} \int T_{\mu \nu } (\omega , x') e^{-i \omega (t - \abs{x-x'} / c) } \dd{\omega } \,, \end{align} % which means that % \begin{align} h_{\mu \nu } (\omega , x) = \frac{4G}{c^{4}} \int \frac{\dd[3]{x'}}{\abs{x - x'}} T_{\mu \nu }(\omega , x') e^{i \omega \abs{x-x'} / c} \,, \end{align} % where we factored out the \(e^{- i \omega t}\) and the integral in \(\dd{\omega }\). With our assumption of slow speed we can expand: % \begin{align} \frac{e^{i \omega \abs{x-x'} / c}}{\abs{x-x'}} \approx \frac{e^{i \omega r}}{r} \,, \end{align} % where \(r = \abs{x}\). This then yields % \begin{align} h_{\mu \nu }(\omega , r) = \frac{4 G}{c^{4}} \frac{e^{i\omega r}}{r} \int_{V} T_{\mu \nu } (\omega , x') \dd[3]{x'} \,, \end{align} % so we can come back to % \begin{align} \overline{h}_{\mu \nu } (t, r) = \frac{4 G}{c^{4}r} \int T_{\mu \nu } (t- r/c, x') \dd[3]{x'} \,. \end{align} We can simplify this thanks to the expression \(T^{\mu \nu }{}_{, \nu } = 0\), which means we have conservation laws in the form \(\int T^{\mu 0} \dd[3]{x}\). We can put these constants to zero (since we are not interested in any non-wavelike behavior, like the stationary Kerr-like metric due to the source). We can do an integral of \(\partial_{\mu } T^{\mu \nu }= 0\): % \begin{align} \frac{1}{c} \pdv{}{t} \int _V T^{n 0} x^{k} \dd[3]{x} &= - \int \pdv{T^{ni}}{x^i} x^{k} \dd[3]{x} \\ &= \underbrace{\int \dd{S^{i}} (T^{ni} x^{k})}_{ \to 0} + \int T^{nk} \dd[3]{x} \\ \frac{1}{c} \pdv{}{t} \int x^{k} T^{n0} \dd[3]{x} &= \int T^{nk} \dd[3]{x} \,. \end{align} We can write this as % \begin{align} \frac{1}{2} \pdv{}{t} \int \qty[ T^{n0} x^{k} + T^{k0} x^{n}] = \int T^{nk} \dd[3]{x} \,. \end{align} The time component of the conservation law can be multiplied by \(x^{n} x^{k}\): % \begin{align} \frac{1}{c} \pdv{}{t} \int T^{00} x^{n} x^{k} \dd[3]{x} &= - \int \pdv{}{x^{i}} T^{0i} x^{n} x^{k} \dd[3]{x} \\ &= + \int T^{0i} \pdv{}{x^{i}} (x^{n} x^{k}) \dd[3]{x} \\ &= \int T^{0n} x^{k} + T^{0k} x^{n} \dd[3]{x} \,. \end{align} We then take a second derivative: % \begin{align} \frac{1}{c^2} \pdv[2]{}{t} \int T^{00} x^{n} x^{k} \dd[3]{x} &= \frac{1}{c} \pdv{}{t} \int T^{n0} x^{k} + T^{k0} x^{n} \dd[3]{x} \\ &= 2 \int T^{nk} \dd[3]{x} \,. \end{align} This is the \textbf{virial theorem} in GR. We will assume we are working on a \(t = \text{const}\) hypersurface. The metric is purely Euclidean there. The object % \begin{align} \frac{1}{c^2} \int T^{00} x^{n} x^{k} \dd[3]{x} = q^{n k} (t) \,, \end{align} % the quadrupole tensor. We then have % \begin{align} \frac{1}{2} \ddot{q}^{n k} (t) = \int T^{nk} \dd[3]{x} \,. \end{align} The result is therefore \(h^{\mu 0} = 0\), and % \begin{align} \overline{h}^{n k} = \frac{2G}{c^{4} r} \ddot{q}^{n k}(t) \,. \end{align} \end{document}
{ "alphanum_fraction": 0.5642530985, "avg_line_length": 29.2, "ext": "tex", "hexsha": "6f2f484383da853e1559db432352f2d22674fdf9", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2021-08-06T16:11:07.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-03T16:20:19.000Z", "max_forks_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "jacopok/notes", "max_forks_repo_path": "phd_courses/theoretical_gravitation_cosmology/dec01.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "jacopok/notes", "max_issues_repo_path": "phd_courses/theoretical_gravitation_cosmology/dec01.tex", "max_line_length": 208, "max_stars_count": 6, "max_stars_repo_head_hexsha": "805ebe1be49bbd14c6b46b24055f9fc7d1cd2586", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "jacopok/notes", "max_stars_repo_path": "phd_courses/theoretical_gravitation_cosmology/dec01.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-13T14:52:50.000Z", "max_stars_repo_stars_event_min_datetime": "2019-10-10T13:10:57.000Z", "num_tokens": 2570, "size": 6132 }
% PLEASE USE THIS FILE AS A TEMPLATE FOR THE PUBLICATION % Check file IOS-Book-Article.tex % preferred topic area: algorithms % % at most 5 keywords % % relevance % % originality \documentclass{IOS-Book-Article} %[seceqn,secfloat,secthm] \usepackage{mathptmx} %\usepackage[T1]{fontenc} %\usepackage{times}% % %%%%%%%%%%% Put your definitions here \usepackage{amsmath,amssymb} \usepackage{cite} \usepackage{color} \usepackage{graphicx} \usepackage{url} \usepackage{xspace} \newcommand{\comment}[1]{\textcolor{green}{[DAC: #1]}\xspace} \newcommand{\todo}[1]{\textcolor{red}{[TODO: #1]}\xspace} \newcommand{\sR}{\mathbb{R}} \newcommand{\dt}{\mathrm{d}t} \newcommand{\clawpack}{{\sc Clawpack}\xspace} \newcommand{\forestclaw}{Forestclaw\xspace} \newcommand{\pforest}{\texttt{p4est}\xspace} \newcommand{\manyclaw}{Manyclaw\xspace} % \newcommand{\plotbox}[1]{\fbox{#1}} \newcommand{\plotbox}[1]{#1} \newcommand{\Fig}[1]{Figure~\ref{fig:#1}} %%%%%%%%%%% End of definitions \begin{document} \begin{frontmatter} % The preamble begins here. % %\pretitle{} \title{\forestclaw: Hybrid forest-of-octrees AMR for hyperbolic conservation laws} \runningtitle{\forestclaw} %\subtitle{} % Two or more authors: %\author[A]{\fnms{} \snm{}}, %\author[B]{\fnms{} \snm{}} %\runningauthor{} %\address[A]{} %\address[B]{} % \author[A]{\fnms{Carsten} \snm{Burstedde}% \thanks{Corresponding author. E-mail: \texttt{[email protected]}}}, \author[B]{\fnms{Donna} \snm{Calhoun}}, \author[C]{\fnms{Kyle} \snm{Mandli}} and \author[C]{\fnms{Andy R.} \snm{Terrel}} \runningauthor{C.\ Burstedde et al.} \address[A]{Institut f\"ur Numerische Simulation, Universit\"at Bonn, Germany} \address[B]{Boise State University, Idaho, USA} \address[C]{Institute for Computational Engineering and Sciences,\\ The University of Texas at Austin, USA} \begin{abstract} % We present a new hybrid paradigm for parallel adaptive mesh refinement (AMR) that combines the scalability and lightweight architecture of tree-based AMR with the computational efficiency of patch-based solvers for hyperbolic conservation laws. The key idea is to interpret each leaf of the AMR hierarchy as one uniform compute patch in $\sR^d$ of dimensions $m^d$, where $m$ is customarily between 8 and 32. Thus, computation on each patch can be optimized for speed, while we inherit the flexibility of adaptive meshes. In our work we choose to work with the \pforest AMR library since it allows us to compose the mesh from multiple mapped octrees and enable the cubed sphere and other nontrivial multiblock geometries. % \end{abstract} \begin{keyword} adaptive mesh refinement, hyperbolic conservation laws, clawpack, HPC, manycore \end{keyword} \end{frontmatter} %%%%%%%%%%% The article body starts: \section{Introduction} With the advent of many-core chips, such as GPUs and the MIC architecture, comes the opportunity to sustain unprecedented rates of floating point operations at comparably high integration density and low cost. These architectures, however, require careful structuring of the data layout and memory access patterns to exhaust their multithreading and vectorization capabilities. Consequently, it is not clear a priori how to accelerate PDE solvers that use adaptive mesh refinement. %, especially when working with %unstructured meshes \comment{This sounds we are now going to talk % about unstructured meshes.}. % CB: Good point, took it out. Of course, it was realized early that it helps to aggregate degrees of freedom (DOF) at the element level, as has been done with high-order spectral element \cite{TufoFischer99}, low order continuous Galerkin via methods that accumulate many elements simultaneously \cite{knepleyterrel:2013}, %finite volume (element?) or discontinuous Galerkin \cite{HesthavenWarburton02} methods. GPU implementations of the latter have been proposed recently \cite{KlocknerWarburtonBridgeEtAl09, BursteddeGhattasGurnisEtAl10}. The finite volume method has typically been implemented using a single degree of freedom per cell on % either structured \cite{ppm, clawpack} or unstructured meshes \cite{openfoam}, where higher order methods have been constructed by widening the stencil, for instance in WENO methods \cite{Shu:2009bi}. % \comment{Are you looking for % references to parallel AMR codes? Or more general AMR codes based % on the FVM?} \todo{No, I was looking for unstructured FV % references. Would you have any, and can we add the block-AMR % references later?} \comment{But most finite volume methods % (structured or unstructured) only have a single degree of freedom % per mesh cell (``element''?) Only recently have people increased % the order by extending the stencil (without increase the DOFs per % mesh cell).} % CB: Can you fix some of the above citations? % For example, SEs optimized for computational speed have originally been % implemented on unstructured conforming meshes \cite{TufoFischer99} and later % been extended to non-conforming adaptive meshes \cite{FischerKruseLoth02, % BursteddeGhattasGurnisEtAl10}. To facilitate hardware acceleration for parallel dynamic AMR, we build upon the forest-of-octrees paradigm because of its low overhead and proven scalability \cite{BursteddeWilcoxGhattas11}. This approach identifies each octree leaf with a mesh element. In our work, we go beyond the traditional high-order element and define each element to be a dense computational patch with $m^d$ DOFs. In fact, this approach resembles block-structured AMR \cite{be-ol:1984, be-co:1989, % be-le:1991 (cited below) ColellaGravesKeenEtAl07, BerzinsLuitjensMengEtAl10} except that the patches are not overlapping, % (except in ghost cell regions), % CB: we comment on this later in Section 4. which enables us to capitalize on our previous experience with scalable FE solvers for PDEs \cite{BursteddeStadlerAlisicEtAl13}. The \clawpack software \cite{LeVeque97} provides a popular implementation of such a patch. It has been designed to solve hyperbolic conservation laws % on a uniform compute patch and successfully used in the context of block-structured AMR \cite{be-le:1991, amrclaw, Berger:2011du}. In this paper we describe our design for the coupling of forest-of-octree AMR with \clawpack at the leaf level. We comment on challenges that arise in enabling multiblock geometries and efficient parallelism and conclude with a range of numerical examples that demonstrate the conceptual improvements in relation to other approaches. \section{Design principles} The starting point of our work is defined by the \pforest algorithms for forest-of-octrees AMR on the one hand, and the \clawpack algorithms for the numerical solution of hyperbolic conservation laws % on uniformly gridded domains on the other. Both are specialized codes with the following characteristics: % \comment{I am not sure about the Python dependency; the latest version of % \clawpack relies on %Python to generate an input file, but the code itself is F77 (or maybe some %F90). } % CB: I made it optional. % DAC: Good. \begin{center} \begin{tabular}{l|l|l} & \multicolumn{1}{c|}{\pforest} & \multicolumn{1}{c}{\clawpack} \\ \hline subject & hexahedral nonconforming mesh & hyperbolic PDE on $[0, 1]^d$ \\ toplevel unit & forest of octrees & patch of $m^d$ FV cells \\ atomic unit & octree leaf & one DOF in each cell \\ parallelization & MPI & threads (\manyclaw variant) \\ memory access & distributed & shared on each MPI rank \\ data type & integers & floating point values \\ language & C & Fortran 77 \\ % CB: dropped dependencies. % dependencies & none & Python (optional) \\ %\hline \end{tabular} \end{center} Each leaf as the atomic unit of \pforest houses a toplevel unit of \clawpack. The term cell is used to identify a single DOF within a \clawpack patch. The proposed 1:1 correspondence between a leaf and a patch thus combines two previously disjoint models in a modular way: \begin{enumerate} \item We permit the reuse of existing, verified, and performant codes. % if the integration is done well \item We preserve the separation between the mesh on one hand and the discretization and solvers on the other. \item The AMR metadata (\pforest: under 1k bytes per octree, 8 bytes per MPI rank, 24 bytes per leaf. \forestclaw: $84 + 28d$ bytes per patch) % \comment{?? : in 3d: 5 ints, 9 floats; in 2d: 4 ints, 6 floats} % CB: thanks, I have added it. is insignificant compared to the numerical data ($m^d$ floating point values per patch). \item The resulting parallel programming model is a hybrid (often referred to as MPI+X). Only rank-local leaves/patches are stored and computed on. \end{enumerate} % yields periodicity as a special case A particular feature of \forestclaw is that the generic handling of multiblock geometries is inherited from \pforest, identifying each octree as a block. % and each leaf as a patch. Each block is understood as a reference unit cube with its own geometric mapping. The connectivity of the blocks can be created by external hexahedral mesh generators, eliminating the need to encode it by hand. A main challenge is presented by the fact that the patch neighborhood is only known to \pforest. This patch connectivity information needs to be propagated to the numerical code in \forestclaw that implements the interaction with neighbor patches via the use of a layer of ghost cells surrounding each patch. To this end, we define an interface that allows read-only access to the sequence of blocks, the list of patches for each, and a lookup of neighbor patches (and their relative orientation which is nontrivial between blocks). Suitably informed by \pforest, \forestclaw stores only the patches local to each MPI rank. The exchange of ghost data and parallel repartitioning is provided transparently by \pforest. Mesh modification directives, such as adaptive refinement and coarsening, are called from \forestclaw and relayed to \pforest. % Forestclaw would support swapping out p4est or \clawpack % AMR Metadata $\ll$ numerical data % Experience from integration with large-scale adaptive-mesh PDE solvers. % Design goals: Lightweight, modular, reuse. % Mesh information is discrete (tree nodes have integer coordinates in % $(0, 2^L($ where $L$ is the maximum allowed refinement level of the tree. % Dimension independence % rank as is common with parallelization using the MPI framework % \cite{Forum94, SnirOttoHuss-LedermanEtAl96}. \section{Parallelization} \begin{figure} \begin{center} \includegraphics[width=.4\columnwidth]{../figs_misc/forest_stacked} \hspace{.05\columnwidth} \includegraphics[width=.4\columnwidth]{../figs_misc/cubed3} \end{center} \caption{Left: Forest of two quadtrees, partitioned among three MPI processes. Each quadtree has its own coordinate orientation. The situation in 3D (octrees) is analogous. Right: The leaves in a forest of six quadtrees that serves as the computational domain for the cubed sphere. An adhoc refinement pattern has been 2:1 balanced in terms of neighbor sizes and partitioned between five MPI processes (the three middle ones are shown from black to white).} \label{fig:cubed3} \end{figure} The MPI layer is addressed from within \pforest and not exposed to the \forestclaw code. The order of leaves is maintained in \pforest according to a space filling curve. Each MPI rank has a local view on its own partition, augmented where necessary with information about one layer of ghost leaves (see \Fig{cubed3}). \forestclaw uses iterators over all rank-local leafs, optionally restricted to a given level. Random access is possible and used when executing $O(1)$ time neighbor lookups. Looping over the patches in the order prescribed by the forest % \comment{tree?} % CB: I think forest is fine here. and accessing neighbors only relative to the current patch leads to a high percentage of cache reuse \cite{BursteddeBurtscherGhattasEtAl09}. % since the ghost patches are likely closeby with respect to the $z$-order. When \forestclaw accesses neighbor patches, they can be on the same or a different block. In the latter case, coordinate transformations are carried out. The structure of \forestclaw is oblivious to the fact that it only has a local view of the distributed mesh and data which relieves the developer from programming to the MPI interface. Parallel neighbor exchanges are hidden inside \pforest and called by \forestclaw at well-defined synchronization points. There is one such parallel data exchange per time step for a global value of $\dt$, or one exchange per discretization level per time step if $\dt$ is chosen per-patch depending on its size (this is sometimes called sub-cycling). When using sub-cycling, the load balance is attained per level as it is done for example in Chombo \cite{ColellaGravesKeenEtAl07} or recent geometric adaptive multigrid schemes \cite{SundarBirosBursteddeEtAl12}. The threaded parallelism over the degrees of freedom of a patch can be handled by \forestclaw alone without the need to involve \pforest. Additionally many-core implementations, such as \manyclaw \cite{manyclaw}, can be used for the hyperbolic integration on a leaf-patch. The design of leaf-patches allows for the smart management of data local to accelerator and the host. % Trees have different coordinate systems. % neighbor block: permutation of patch points due to non-aligned coordinate % systems % neighbor rank: data needs to be fetched over the network first. % $L$ is 30 for 2D and 19 for 3D, so it allows for deep hierarchies. % fast (O(log N/P)) neighbor and parent/child lookups % horizontal/vertical tree traversal % \todo{What is BearClaw (tree-based / S.\ Mitran)?} \section{Patch-based numerics at the leaf level} % CB: I'm trying to avoid "mesh" cell here since in \pforest FE simulations % that would mean a whole leaf. For hyperbolic problems, we integrate the solution on a single uniform patch, containing $m^d$ cells, using the wave propagation algorithm described by R. J. LeVeque \cite{LeVeque:1997eg} and implemented in \clawpack \cite{le:2002, clawpack}. We assume a single degree of freedom per cell and reconstruct a piecewise constant solution to obtain left and right states at cell edges. At each edge, we solve Riemann problems to obtain left and right going waves propagating at speeds determined from the solution to the Riemann problem. For scalar advection, the speed of each wave is simply the local advection speed at the cell interface. For non-linear problems and systems, an approximate Riemann solver, such as a Roe solver \cite{roesolver}, is typically used. Since much of the physics of an application can be contained in the Riemann solver, \forestclaw adopts \clawpack's interface to Riemann solvers effectively allowing problems solved with \clawpack to be solvable in \forestclaw. In order to achieve second order accuracy wave limiters are used to suppress spurious oscillations at sharp gradients in the flow. Data exchanges between neighboring patches are done via layers of ghost cells extending the dimensions along the edges of each patch. The interior edge values of a given patch overlap the ghost cell region of a neighboring patch. For the second order wave propagation algorithm, two layers of ghost cells are sufficient. This implies that one layer of ghost patches is sufficient for $m \ge 4$. Neighboring patches at the same level of refinement simply copy their interior edge values into a neighbors' ghost cells. Neighboring fine grid patches average their interior edge data to a coarser neighbor's ghost cell values. And neighboring coarse grid patches interpolate data from their interior edge cells to their fine grid neighbor's ghost cell values. To avoid loss of conservation and the creation of spurious extrema, we use a standard conservative, limited interpolation scheme to interpolate values from the coarse grid to fine grid coarse cells \cite{amrclaw, chombo}. When sub-cycling, time accurate data between coarse grids is used to fill in ghost cells for fine grids. As mentioned in the previous section, this procedure can be extended transparently to distributed parallelism by defining an abstract exchange routine for ghost patch data. Grid refinement and coarsening requires interpolation from coarse grids to newly created fine grids, and the averaging of fine grid data to a newly created underlying coarse grid. This operation is rank-local, analogous to the general dynamic AMR procedures used in \pforest-based FE codes \cite[Fig.\ 4]{BursteddeGhattasStadlerEtAl08}. \section{Numerical results} \begin{figure} \begin{center} \plotbox{\includegraphics[width=.4\columnwidth]{quadtree}} \hspace{.05\columnwidth} \plotbox{\includegraphics[width=.4\columnwidth]{sphere_plot167}} \end{center} \caption{Numerical results for solving the advection equation. Left: Unit square (single quadtree) with a twisting velocity field. Right: Spherical ball with a rotational velocity field constructed from two mapped quadtrees. In both cases the concentration is color-coded with a sharp gradient shown in red. The adaptive mesh refinement follows the location of the gradient (the patches are not shown where they become too fine for display). Here we use $m = 8$ cells per \clawpack patch.} \label{fig:results2d} \end{figure} We provide two examples that demonstrate the \forestclaw code. The numerical results to date have been designed to verify that interface layer between \pforest and \forestclaw is sufficiently flexible and robust. Of particular importance was ensuring that all ghost cell transfers (including averaging and interpolation) have been implemented correctly. The basic \clawpack algorithm and corresponding code have been thoroughly tested and need no further verification in our context. In both sets of numerical results, we solve a scalar advection equation, \begin{equation} q_t + ({\bf u}\cdot \nabla) q = 0 , \end{equation} where the velocity field ${\bf u} = (u(\xi,\eta,t), v(\xi,\eta,t)$ is a prescribed function of computational coordinates. The relevant numerical parameters that are set in each case include the size of the patch on each leaf ($m=8$ in each case), and the minimum and maximum refinement levels, which in turn fix minimum and maximum effective grid resolutions. In both examples, a patch is tagged for refinement if the difference between its maximum and minimum values exceeds a prescribed threshold. A family of $2^d$ patches is coarsened to a parent patch if this patch would not meet the criteria for refinement. \begin{description} \item{\bf Example 1:} An initial concentration field $q$ is set to 0 in the left half of a computational square and to 1 in the right half. A time dependent flow field is prescribed that distorts the interface between the 0 and 1 concentration values. The minimum and maximum levels of refinement are set to 3 and 6, respectively. This results in a minimum resolution of $64 \times 64$ and a maximum resolution of $512 \times 512$. \Fig{results2d} shows the results at an intermediate time step. % \begin{figure} % \begin{center} % \plotbox{\includegraphics[height=0.485\textwidth,clip=true,trim=3cm 0cm 2cm 0cm]{quadtree.png}} \hfil % \plotbox{\includegraphics[height=0.485\textwidth,clip=true,trim=6cm 0cm 7cm 0cm]{sphere_plot167.png}} % \end{center} % \caption{Results for scalar advection using \forestclaw.} % \label{fig:results} % \end{figure} \item{\bf Example 2:} We demonstrate the multiblock functionality of \forestclaw by considering flow on a sphere. The sphere mapping we use consists of two quadtrees, each defined in the computational space $[-1,1] \times [-1,1]$ \cite{ca-he-le:2008, be-ca-he-le:2009}. Each quadtree is mapped to cover one hemisphere. The initial conditions is $q = 0$ in one half of the sphere, and $q = 1$ in the other half, where the halves are not necessarily aligned with the equator of the mapping. The flow field ${u}$ simulates rigid body rotation. We show the results at an intermediate time in \Fig{results2d}. \end{description} The results in this section are preliminary and will be extended in the final version. We have so far exercised \forestclaw for two 2D manifolds, % namely the % unit square and a sphere composed of two mapped squares. % On both we solve the % advection equation with a given velocity field. advecting a concentration field with a sharp gradient that we track dynamically with adaptive mesh refinement. We observe that the refinement follows the front and allows for an accurate representation of its location over time. % \Fig{results2d}. \pforest is parallelized with MPI and well tested on large supercomputers, both in 2D and 3D. Parallelization of \forestclaw is not completed yet but envisioned for the final version of the paper. \section*{Acknowledgements} The authors acknowledge valuable discussion with Randy LeVeque, Marsha Berger, and Hans-Petter Langtangen. We also acknowledge David Ketcheson and the KAUST sponsored HPC$^3$ numerics workshop at which the initial phases of this project were first discussed. The second author would like to also acknowledge the Isaac Newton Institute (Cambridge, UK), where much of the preliminary development work for \forestclaw was done. The leaf/patch paradigm was independently presented by B.\ as part of a talk at the SCI Institute, Salt Lake City, Utah, USA in July 2011. %%%%%%%%%%% The bibliography starts: \bibliographystyle{unsrt} \bibliography{../biblio/ccgo,../biblio/amr} \end{document}
{ "alphanum_fraction": 0.7820203892, "avg_line_length": 44.8648648649, "ext": "tex", "hexsha": "f795a62ad1dbd5958643a0ea7969899b17393483", "lang": "TeX", "max_forks_count": 23, "max_forks_repo_forks_event_max_datetime": "2022-03-14T19:08:36.000Z", "max_forks_repo_forks_event_min_datetime": "2018-02-21T00:10:58.000Z", "max_forks_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "ECLAIRWaveS/ForestClaw", "max_forks_repo_path": "docs/parco13/abstract.tex", "max_issues_count": 75, "max_issues_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_issues_repo_issues_event_max_datetime": "2022-03-31T12:36:32.000Z", "max_issues_repo_issues_event_min_datetime": "2017-08-02T19:56:00.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "ECLAIRWaveS/ForestClaw", "max_issues_repo_path": "docs/parco13/abstract.tex", "max_line_length": 297, "max_stars_count": 34, "max_stars_repo_head_hexsha": "0a18a563b8c91c55fb51b56034fe5d3928db37dd", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "ECLAIRWaveS/ForestClaw", "max_stars_repo_path": "docs/parco13/abstract.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-11T08:56:23.000Z", "max_stars_repo_stars_event_min_datetime": "2017-09-26T13:39:44.000Z", "num_tokens": 5528, "size": 21580 }
\documentclass{tufte-handout} %\geometry{showframe}% for debugging purposes -- displays the margins \usepackage{amsmath} % Set up the images/graphics package \usepackage{asymptote} % for asymptote graphics \usepackage{graphicx} \setkeys{Gin}{width=\linewidth,totalheight=\textheight,keepaspectratio} \graphicspath{{graphics/}} \title{Summing Approximations} \author[Warren MacEvoy]{Warren MacEvoy} % \date{24 January 2009} % if the \date{} command is left out, the current date will be used % The following package makes prettier tables. We're all about the bling! \usepackage{booktabs} \hypersetup{colorlinks} % Comment this line if you don't wish to have colored links \usepackage{amsmath} \usepackage{amssymb} \usepackage{amsthm} \usepackage{listings} \usepackage{siunitx} % align decimals in tables \usepackage{nth} % 1st, 2nd, ... \usepackage{microtype} % Improves character and word spacing \usepackage{asymptote} % for asymptote graphics \usepackage{ccicons} \usepackage{lipsum} % Inserts dummy text \usepackage{booktabs} % Better horizontal rules in tables \usepackage{graphicx} % Needed to insert images into the document % The units package provides nice, non-stacked fractions and better spacing % for units. \usepackage{units} % The fancyvrb package lets us customize the formatting of verbatim % environments. We use a slightly smaller font. \usepackage{fancyvrb} \fvset{fontsize=\normalsize} % Small sections of multiple columns \usepackage{multicol} % Provides paragraphs of dummy text \usepackage{lipsum} % These commands are used to pretty-print LaTeX commands \newcommand{\doccmd}[1]{\texttt{\textbackslash#1}}% command name -- adds backslash automatically \newcommand{\docopt}[1]{\ensuremath{\langle}\textrm{\textit{#1}}\ensuremath{\rangle}}% optional command argument \newcommand{\docarg}[1]{\textrm{\textit{#1}}}% (required) command argument \newenvironment{docspec}{\begin{quote}\noindent}{\end{quote}}% command specification environment \newcommand{\docenv}[1]{\textsf{#1}}% environment name \newcommand{\docpkg}[1]{\texttt{#1}}% package name \newcommand{\doccls}[1]{\texttt{#1}}% document class name \newcommand{\docclsopt}[1]{\texttt{#1}}% document class option name \theoremstyle{definition} \newtheorem{definition}{Definition} \theoremstyle{example} \newtheorem{example}{Example} \theoremstyle{theorem} \newtheorem{theorem}{Theorem} \begin{document} \maketitle% this prints the handout title, author, and date \begin{abstract} \noindent Understanding asymptotic behavior of a series or function is useful. \end{abstract} %\printclassoptions \section{Estimating Sums} \begin{figure} \begin{center} \includegraphics[width=1.00\linewidth]{graphics/intsum.pdf} \end{center} \caption{\label{fig:intmid}Comparing sum (black rectangles) with integrals of increasing $f$ (red), $f$ shifted -1/2 (green), and $f$ shifted -1 (blue).} \label{fig:intmid} \end{figure} Summing is a linear operation, so \begin{equation} \sum_{k=0}^{n-1} \left[ \alpha f(k) + \beta g(k) \right] = \alpha \sum_{k=0}^{n-1} f(k) + \beta \sum_{k=0}^{n-1} g(k) \,. \end{equation} Summing powers of $k$: \begin{equation} \sum_{k=0}^{n-1} 1 = n \,. \end{equation} \begin{equation} \sum_{k=0}^{n-1} k = \frac{1}{2} n(n-1) = \frac{1}{2} n^2 + O(n)\,. \end{equation} \begin{equation} \sum_{k=0}^{n-1} k^2 = \frac{1}{3}n(n-1/2)(n-1) = \frac{1}{3} n^3 + O(n^2) \,. \end{equation} \begin{equation} \sum_{k=0}^{n-1} k^3 = \frac{1}{4}n^2(n-1)^2 = \frac{1}{4} n^4 + O(n^3) \,. \end{equation} Generally, for $p>0$, \begin{equation} \sum_{k=0}^{n-1} k^p \approx \frac{1}{p+1} (n-1/2)^{p+1} = \frac{1}{p+1} n^{p+1} +O(n^p) \,. \end{equation} Estimating sums with integrals. If $f(x)$ is increasing, then \begin{equation} \int_{a-1}^{b} f(x) \, dx \leq \sum_{k=a}^{b} f(k) \approx \int_{a-1/2}^{b+1/2} f(x) dx \leq \int_{a}^{b+1} f(x) \, dx \end{equation} \end{document}
{ "alphanum_fraction": 0.7150192555, "avg_line_length": 31.6666666667, "ext": "tex", "hexsha": "128c059aad19059976adc5a0284be3e3b71894b3", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2018-08-22T17:16:09.000Z", "max_forks_repo_forks_event_min_datetime": "2018-08-22T17:16:09.000Z", "max_forks_repo_head_hexsha": "232224ee94d1cef56bf60ec964953539cf0ace26", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "wmacevoy/algorithms-fall-2018", "max_forks_repo_path": "notes/sums/notes.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "232224ee94d1cef56bf60ec964953539cf0ace26", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "wmacevoy/algorithms-fall-2018", "max_issues_repo_path": "notes/sums/notes.tex", "max_line_length": 153, "max_stars_count": 2, "max_stars_repo_head_hexsha": "232224ee94d1cef56bf60ec964953539cf0ace26", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "wmacevoy/algorithms-fall-2018", "max_stars_repo_path": "notes/sums/notes.tex", "max_stars_repo_stars_event_max_datetime": "2018-08-22T17:14:33.000Z", "max_stars_repo_stars_event_min_datetime": "2018-08-22T17:14:25.000Z", "num_tokens": 1346, "size": 3895 }
\section*{Key skills and experiences} \begin{itemize} \setlength\itemsep{0.1em} \item Strong communication skills \item Fluent in English, Italian and Croatian \item Self manageable \item Experience with open source software development \item Fast learner \end{itemize}
{ "alphanum_fraction": 0.7730496454, "avg_line_length": 28.2, "ext": "tex", "hexsha": "24233f2db1cf0efa4699f64416dbde35e1b0c8b6", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2f6c5159f1afa42f99265ab8c47fc048600fecca", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "LLoyderino/Curriculum-Vitae", "max_forks_repo_path": "sections/skills.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "2f6c5159f1afa42f99265ab8c47fc048600fecca", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "LLoyderino/Curriculum-Vitae", "max_issues_repo_path": "sections/skills.tex", "max_line_length": 56, "max_stars_count": null, "max_stars_repo_head_hexsha": "2f6c5159f1afa42f99265ab8c47fc048600fecca", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "LLoyderino/Curriculum-Vitae", "max_stars_repo_path": "sections/skills.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 74, "size": 282 }
In this section, experiment setups and the results of each experiment in each step are discussed. Besides, different ideas and algorithms which are described in section \ref{sec:exp} are evaluated. At first, machine-learning-based experiments are presented. As machine learning feature engineering, requires multiple experiments to find best working predictors, there are many trials and errors for a variety of algorithms and ideas. Then, Deep learning models are compared to machine learning models. Finally, the FakeNews model is evaluated depends on best stance detection models. The headline of news is a summary of its body content and most of the time, it carries valuable data. So, we focused on detecting the news headlines stance towards claim (\ac{H2C}), as well as the news articles stance towards a claim (\ac{A2C}). According to the lower amount of text in the news headline, most of the experiments are firstly applied to H2C. Then, better approaches are applied to \ac{A2C}. \section{Dataset} \label{sec:dataset} \input{dataset} \section{Tokenizetion} Tokenization of \textit{Hazm}, \textit{Stanza}, \textit{\ac{NLTK}}, and \textit{WordPiece} are evaluated against each other manually. Every single difference in each algorithm performance is evaluated\footnote{All different cases during corpus tokenization by those 4 algorithms are gathered in \href{https://docs.google.com/document/d/1SlRBnoyLntLJ5yalWXZ1EqJ0wRj4DyiEMJdewkEkrTM/edit?usp=sharing}{here}}. Only three cases are presented as examples to compare each tokenizer's performance in figure \ref{fig:tekenres}. As we need to remove particular words and patterns from the corpus, it is important to find a tokenizer that distinguishes all words correctly. Besides, it shouldn't lose information while tokenizing and be as fast as possible. In figure \ref{fig:tekenres}, unsuitable tokens are highlighted in blue. There is not any flawless algorithm. WordPiece is subwords based so there are some unknown (\ac{UNK}) tokens and words are wrongly broken. For example figure \ref{fig:tekenres}, part (b) blue highlighted word is broken into two pieces wrongly. Considering connected pronouns individually is the strength of \textit{Stanza} tokenizer. But sometimes the \text{Stanza} wrongly breaks an original word with a wrong assumption that the desired word has a connected pronoun. Figure \ref{fig:tekenres}, part (c) is a sample on wrong separating pronoun and at part (b), pronouns are separated correctly. It can be seen in figure \ref{fig:tekenres} \textit{Hazm}'s performance is highly similar to \textit{NLTK}. The only difference is that in contrast to \textit{\ac{NLTK}}, Hazm separates numbers and punctuation in the corpus (Figure \ref{fig:tekenres}, part (a)). \begin{figure}% \centering \subfloat[\centering]{{\includegraphics[width=16cm]{statistics/tokenizer/1.png} }}% \qquad \subfloat[\centering ]{{\includegraphics[width=16cm]{statistics/tokenizer/2.png} }}% \qquad \subfloat[\centering]{{\includegraphics[width=16cm]{statistics/tokenizer/3.png} }}% \caption{Comparison performance of \textit{Hazm}, \textit{Stanza(Stanford)}, \textit{NLTK} and \textit{WordPiece(\ac{BERT})} tokenizers.}% \label{fig:tekenres}% \end{figure} Besides, the duration of tokenizing for each tokenizer is compared in figure \ref{fig:tokentime}. While \textit{Hazm} is the fastest words tokenizer among evaluated algorithms, \textit{Stanford} tokenizer lasts significantly longer. According to all pieces of evidence, \textit{Hazm} tokenizer is the best tokenizer for this task. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/tokenizer/duration.png} } \caption{Comparison duration of tokenizing algorithm on \cite{stance_persian} dataset.}% \label{fig:tokentime}% \end{figure} \section{Stop-Words} After preprocessing tokens, stop-words will be removed from the tokens. Firstly, the same stop-words list which has been used by \cite{stance_persian} was used in the project. After reviewing preprocessed corpus, it was hard to infer the stance from text pieces. So we chose stop-words carefully in a way not to lose refuting or supporting expressions. Kharazi\footnote{\label{fn:kharazi}github.com/kharazi/persian-stopwords} has classified Persian stop-words into verbal, nonverbal, and short. Verbs carry valuable information in news. Nonverbal stop-word class is a better choice to remove low-value words in this task. Besides, we added and removed some words from Nonverbal list to become suitable for the news context. The difference between the performance of these four sets of stop-words in addition to skipping removing stop-words (To have a fair comparison) is illustrated in figure \ref{fig:stopwords}. \cite{stance_persian} contains 1255 words which include wide a range of parts of speech. In contrast, NonVerbal\footnote{Gathered by \href{github.com/kharazi/persian-stopwords}{Kharazi}} stop words set includes 158 words and it doesn't support any verb. In the new version of stop-words, 18 stop words are removed from the Nonverbal set. This set is called \textit{shortened} and 82 new words are added. Finally, the \textit{Extended} version contains 233 words. To evaluate the performance of each stop-words sets, all desired features with \ac{TFIDF} as word representation after removing particular stop words are fed into an \ac{SVM} (\cite{svc}) model. It can be inferred from figure \ref{fig:stopwords} that the list of stop-words which is used by \cite{stance_persian} is ignoring valuable data and it is even better not to remove stop-words from the corpus. Nonverbal stop-words achieve higher accuracy on stance detection. Through, whether removing or adding words from Nonverbal (\textit{shortened} version) didn't improve results. Altogether, Nonverbal list of stop-words, performs the best in this task. Then all remaining tokens will be concatenated with a space character and considered as prepossessed and clean corpus. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/AccuracyScore.png} } \caption{Comparison accuracy of \ac{SVM} model with different configuration of stop words.}% \label{fig:stopwords}% \end{figure} \section{Word Representation} As a baseline, three different Bag-of-word (\cite{bow}), \ac{TFIDF} (\cite{tfidf}), and Word-to-Vector (\cite{word2vec}) algorithms are evaluated against each other. More details are explained in section \ref{lr:wordrep}. In this project, FastText\footnote{fasttext.cc} \ac{W2V} model is used with vector lengths equal to 300 which is trained on the Persian Wikipedia website. For both \ac{TFIDF} and \ac{BoW} n-gram range is set from 1 to 2. \ac{BoW} (\cite{bow}), \ac{TFIDF}, and \ac{W2V} (\cite{word2vec}) performances are compared by the \ac{SVM} \ac{ML} algorithm in the stance detection task. According to figure \ref{fig:wordrep} \ac{TFIDF} performs the best in comparison to \ac{BoW} and \ac{W2V} in order to represent words. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/WordRep.png} } \caption{Comparison \ac{SVM} model with \ac{BoW}, \ac{TFIDF} and \ac{W2V} word representaion algorithms.}% \label{fig:wordrep}% \end{figure} \section{Predictors} \label{sec:predictors} Different combinations of calculated predictor are run to find out that which set of predictors performs better. Desired predictors are Similarity, RootDistance, IsQuestion, HasTwoParts and Polarity. In this Section, both \ac{SVM} (\cite{svc}) and RandomForest (\cite{randomforest}) results are considered in evaluation to have a more accurate analysis. The similarity score is calculated by utilizing \textit{difflib}\footnote{docs.python.org/3/library/difflib.html} python library. Firstly, both models are trained on the corpus representation by \ac{TFIDF} without any other predictor. Then, each predictor is added into \ac{TFIDF} vector to evaluate their effectiveness individually, and finally, all features together are fed to models. According to table \ref{tlb:predictors}, Similarity and ImportantWords have the highest positive effect respectively, on both accuracy (\ref{eq:acc}) and f1-score (\ref{eq:f1}). Similarity score has improved accuracy 15 to 17 percent and ImportantWords has improved \ac{SVM} accuracy by almost 4 percent. Though, predictors such as IsQuestion, HasTwoParts, and Polarity don't have a significant effect on results, using them all together boost total accuracy and f1-score. Due to figure \ref{tlb:predictors}, using RootDistance, IsQuestion, and HasTwoParts individually decreases accuracy, so models are also trained with two other variations. Though IsQuestion, HasTwoParts, and polarity don't have a positive effect individually, using them with Similarity, ImportantWrods, and polarity boost accuracy. On the other hand, removing RootDistance from All predictors mode whether has a negligible effect or improves accuracy. According to previous comparisons and evaluation inferred from table \ref{tlb:predictors}, the best predictors to use for stance detection task is the combination of Similarity, ImportantWords, IsQuestion, HasTwoParts, and Polarity predictors in addition to \ac{TFIDF} as the corpus representer, altogether. \begin{table} \centering \caption{Comparison of \ac{Acc} score and F1-score with different combinations of predictors for both \ac{SVM} and Random Forest classifiers.} \setlength{\extrarowheight}{5pt}% \begin{tabular*}{350pt}{@{\extracolsep{\fill}}| l | l | l | l | l |} \hline \multicolumn{1}{|c|}{} & \multicolumn{2}{l|}{SVM} & \multicolumn{2}{l|}{RandomForest} \\ \cline{2-5} \multicolumn{1}{|c|}{\multirow{-2}{*}{\begin{tabular}[c]{@{}c@{}}Predictors\\ Model\end{tabular}}} & \ac{Acc} & F1. & \ac{Acc} & F1. \\ \cline{1-1} \hline \hline \multicolumn{1}{|c|}{\ac{TFIDF} only} & 51.83 & 51.90 & 52.79 & 54.00 \\ \cline{1-1} \hline + Similarity & 66.85 & 66.71 & 68.78 & 67.88 \\ \cline{1-1} \hline + Root Distance & 51.25 & 51.50 & 49.71 & 50.52 \\ \cline{1-1} \hline + Important Words & 56.64 & 56.94 & 52.98 & 52.49 \\ \cline{1-1} \hline + Is Question & 51.63 & 51.79 & 51.63 & 52.70 \\ \cline{1-1} \hline + Has Two Parts & 51.83 & 51.90 & 50.28 & 50.87 \\ \cline{1-1} \hline + Polarity & 52.21 & 52.50 & 52.40 & 53.25 \\ \cline{1-1} \hline + All & 69.74 & 69.75 & 69.36 & 68.75 \\ \cline{1-1} \hline + All - Root Distance & 69.74 & 69.69 & \textbf{70.71} & \textbf{70.28} \\ \hline + Similarity + ImportantWords & 69.74 & 69.69 & 67.82 & 67.02 \\ \hline \end{tabular*} \label{tlb:predictors} \end{table} \section{Machine Learning} \label{sec:ml} In this section, each Gaussian Naive Bayes, \ac{SVM}, Linear \ac{SVC}, Random Forest, and Logistic Regression parameters are tuned on stance detection task with respect to chosen predictors in the previous section (\ref{sec:predictors}). In the next step, the performance of models is compared to each other. \subsection{SVM} In this project, We adopted \ac{SVC} model implementation from \textit{scikit-learn}\footnote{scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html} python library is used in this project. \textit{class\_weight} parameter set to \textit{balanced} to compensate imbalanced data. Three \ac{SVM} classifier tuned parameters are Kernel, Regularization parameter (C) and degree of polynomial kernel. Evaluated kernels are Radial Basis Function (\ac{RBF}), Polynomial and Sigmoid. According to figure \ref{fig:svm}, Sigmoid works weak for this task. Each polynomial kernel behaves differently due value of the regularization parameter. Polynomial with degrees 2 and 3 perform better than 1 and 4. Linear polynomial may be so simple and \ac{SVM} is not good enough to classify stance with a polynomial with degree 4. \ac{RBF}, Polynomial degree 3 behave similarly due to the regularization parameter changes. The best configuration for \ac{SVM} classifier is using \ac{RBF} as the kernel with regularization parameter equal to 2.5 confirming figure \ref{fig:svm}. Learning procedure and details of each model training exist in this project GitHub repository\footnote{Different SVM configuration training details \href{https://github.com/mahsaghn/stance\_detection/tree/main/selected\_outputs/machinelearning/svm}{[here]}.}. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/svm.png} } \caption{Tuning \ac{SVM} model parameters with \ac{TFIDF} representation algorithms}% \label{fig:svm}% \end{figure} \subsection{Linear SVC} Linear Support Vector Machine Classifier algorithm, loss function and Regularization parameters are tuned with penalty equals to \textit{l2}. Figure \ref{fig:linearsvm} illustrates comparison between LinearSVC models with loss functions equal to Hinge or Squared Hinge, and tuned \ac{SVM} algorithm. Hinge loss function has scored better performance than Squared Hinge. When Squared Hing is used as loss function, as the Regularization parameter increases, accuracy score decrease. Though Regularization parameter don't have significant effect on accuracy score when loss is equal to Hinge. In conclusion, due to figure \ref{fig:linearsvm} Best configuration is for Hinge loss and Regularization parameter equals to 1.0. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/linearsvm.png} } \caption{Tuning LinearSVC model parameters with \ac{TFIDF} representation algorithms.}% \label{fig:linearsvm}% \end{figure} \subsection{Random Forest} In this project, implemented Random Forest algorithm from \textit{scikit-learn}\footnote{scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html} python library is used. Three parameters of \textit{max\_features} (Maximum number of features allowed to use for each tree), \textit{estimator} (Number of decision trees), and \textit{criterion} (Algorithm to measure the quality of splits in nodes) are tuned for the desired task. Figure \ref{fig:randomforest} illustrates the effect of the number of trees in the forest on accuracy among different configuration. The average accuracy score of models increases by adding more trees into the forest. Besides, \textit{gini} algorithm performs better than \textit{entropy} to measure the quality of splits. Also, three different upper bounds are considered for number of features when looking for a split. No boundary, \textit{sqrt} of total features and \textit{log2} of total feature. According to figure \ref{fig:randomforest} don't apply any boundary leads to better accuracy on average. Furthermore, as the boundary gets tighten average performance decreases. The best configuration for Random Forest \ac{ML} model in this task is using \textit{gini} algorithm to evaluate splitting quality, not applying any boundary on the number of features and having 125 decision trees in the forest. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/randomforest.png} } \caption{Random Forest \ac{ML} model different configuration on stance detection task. Type of line presents type of the boundary applied on each model. Solid line, dash line and doted line stands for no boundary, sqrt of total feature and log2 of total feature respectively.}% \label{fig:randomforest}% \end{figure} \subsection{Logistic Regression} We also adopted the Logistic Regression algorithm from the \textit{sckiti-learn}\footnote{scikit-learn.org/stable/modules/generated/sklearn.linear\_model.LogisticRegression.html} python library. Many experiments are designed to evaluate behavior of Logistic Regression models. \textit{Elasticnet} (Equation \ref{eq:logisel}) penalty algorithm which is used in penalization procedure, used for \textit{saga} solver and \textit{l2} penalty algorithm is used for \textit{sag}, \textit{lbfgs}, and \textit{newton-cg} solvers. \textit{Elasticnet} has a $\rho$ parameter which determines the portion of using $l1$ to $l2$ penalty in \textit{\ac{SAGA}} solver. Figure \ref{fig:logistic1} illustrate the effect of $\rho$ values from $0$ to $0.9$ on the accuracy of stance detection. Besides, models with a regression parameter from $0.5$ to $4.5$ are evaluated from the determined $\rho$ range. It can be inferred from figure \ref{fig:logistic1} that $\rho$ parameter doesn't have a significant effect on the accuracy of the model. While regression parameter between $1$ and $2.5$ clearly results in higher accuracy rather than external range. It can be also inferred from figure \ref{fig:logistic2} which illustrates the effect of the regression parameter on stance classification accuracy. Models with different values of $\rho$ behave similarly and best performances happen when regression parameter is between $1$ and $2.5$. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/logistic_elastic1.png} } \caption{Effect of $\rho$ parameter of \textit{elasticnet} penalty on stance detection task.}% \label{fig:logistic1}% \end{figure} \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/logistic_elastic2.png} } \caption{Effect of regression parameter of \textit{elasticnet} penalty on stance detection task.}% \label{fig:logistic2}% \end{figure} Another variant of Logistic Regression setup is to use $l2$ penalty with desired solver algorithms. Figure \ref{fig:logistic3} compares best \textit{\ac{SAGA}} solver with \textit{\ac{SAG}}, \textit{lbfgs}, and \textit{newton-cg}. The Logistic Regression with \textit{lbfgs} solver and regression parameter equals 1.5 has recorded the highest accuracy. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/logistic1.png} } \caption{Comprasion of Logistic Regression \ac{ML} models.}% \label{fig:logistic3}% \end{figure} \subsection{Comparison} In previous sections parameters of each \ac{SVM}, LinearSVC, Random Forest, and Logistic Regression models are tuned. In this section models with desired parameters have run 5 times each to have more reliable results and comparison. Average accuracy and highest accuracy recorded in tuning phase, compared in figure \ref{fig:all}. The highest achievable accuracy with \ac{ML} models to classify the stance of a claim towards the headline of a news article is 74.01\%. \begin{figure}% \centering {\includegraphics[width=12.5cm]{statistics/machinlearning.png} } \caption{Comparison \ac{SVM} model between \ac{BoW}, \ac{TFIDF} and \ac{W2V} word representation algorithms.}% \label{fig:all}% \end{figure} \section{Dataset Balancing} As mentioned in section \nameref{sec:dataset}, Figure \ref{fig:datacom}, the number of samples in dataset classes was imbalanced. So oversampling should be performed in classes except for the majority class. In \cite{stance_persian} minority class forms only 7.4\% of data (Figure \ref{fig:datacom}). All oversampling methods are evaluated against each other in this project and utilized from the oversampling package of \textit{imblearn} \footnote{imbalanced-learn.org/stable/references/over\_sampling.html} python library. \begin{figure}% \centering \subfloat[\centering Artcile to Claim]{{\includegraphics[width=6cm]{statistics/stance/a2c_b1.png} }}% \qquad \subfloat[\centering Headline to claim ]{{\includegraphics[width=6cm]{statistics/stance/h2c_b1.png} }}% \caption{Comparison between \ac{A2C} and \ac{H2C} labels, samples distribution in \cite{stance_persian} dataset, after extending by \cite{parsfever} .}% \label{fig:datab1}% \end{figure} Firstly, \ac{ADASYN}, SMOTE, SVMSMOTE, BorderLineSmote, and RandomOverSampler oversampling methods are applied on the \cite{stance_persian} dataset. In \ac{ADASYN} algorithm, the number of nearest neighbors to generate a new sample is set to 9\footnote{imbalanced-learn.org/stable/references/generated/imblearn.over\_sampling.ADASYN.html}. Each method is evaluated against five desired \ac{ML} models. Red series in figure \ref{fig:balanc} stands for the accuracy of models, associated with the \cite{stance_persian} dataset. LinearSVC, LogisticRegresion, and GaussianNaibeBayes are not compatible with any oversampling method. Though, \ac{ADASYN} oversampling method has increased these two model accuracy $5$ percent on average. In the second step, the dataset is extended by the ParsFEVER (\cite{parsfever}) dataset. Though the number of samples is increased accuracy is obviously decreased, but the GaussianNB model. This may happen that data sources from each dataset are totally different and headlines in ParsFEVER are much longer than the \cite{stance_persian} dataset. \begin{figure}% \centering {\includegraphics[width=14.5cm]{statistics/balancing.png} } \caption{Comparison \ac{SVM} model with \ac{BoW}, \ac{TFIDF} and \ac{W2V} word representation algorithms.}% \label{fig:balanc}% \end{figure} \section{Deep Learning} \label{sec:dl} A Pre-trained \ac{BERT}-based model is used at the top of the model, then two Dense layers and finally a Dense including 4 neurons to classify stance is considered for the end-to-end system. The input of our deep learning (\ac{DL}) models is \textit{input ids}, \textit{token type ids}, and \textit{attention mask}. \ac{BERT} (\cite{bert}), \ac{ParsBERT} \cite{parsbert}, and \ac{ALBERT} (\cite{albert}) models are substitute with Pretrained ML model in figure \ref{fig:dlschm}. Each epoch lasts about 26 seconds in the training procedure. Figure \ref{fig:deep} illustrates the model training procedure. In comparison to \ac{ML} models, the \ac{DL} model has boosted the accuracy of \ac{H2C} stance detection by 10 percent. The \ac{BERT}-based model has been learned for 20 epochs. Validation loss has been stated to increase since epoch 11 and validation accuracy has not changed considerably then. Best validation accuracy has converged on $80.92\%$ on the \ac{H2C} dataset. The \ac{ParsBERT}-based model has been learned for 20 epochs. Best validation accuracy has converged on $81.11\%$ on the \ac{H2C} dataset. In comparison to the \ac{ML} algorithm, \ac{DL} algorithm has enhanced about $10\%$ accuracy. Besides, the loss score has converged on a lower score than \ac{BERT}-based model. Though, the best recorded \ac{ALBERT} language model on 20 epochs is at most $70.52\%$ on accuracy score. Figure \ref{fig:deep}, parts (e) and (f) is illustrated the training procedure. Among these three alternatives of the \ac{BERT} algorithm, \ac{ParsBERT} based model has recorded the best accuracy score with $85.48\%$ accuracy on stance prediction. \begin{figure}% \centering \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/deep/bert_loss.png} }}% \qquad \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/deep/bert_acc.png} }}% \qquad \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/deep/parsbert_loss.png} }}% \qquad \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/deep/parsbert_acc.png} }}% \qquad \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/deep/albert_loss.png} }}% \qquad \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/deep/albert_acc.png} }}% \caption{Deep learning procedure on the \ac{H2C} stance detection task. Left figures illustrate loss score and right figures illustrate accuracy score of train and test data during the training procedure. (a, b) Pre-trained language model based on Google's \ac{BERT} (\cite{parsbert}) on Persian corpus (c, d) Pre-trained monolingual language model based on \ac{ParsBERT} (\cite{parsbert}) on Persian corpus. (e, f) Pre-trained language model based on \ac{ALBERT} (\cite{albert}) on Persian corpus}% \label{fig:deep}% \end{figure} \begin{table*}[t] \centering \small \caption{Comparison of \ac{H2C} stance detection models.} \def\arraystretch{1.3}% \setlength{\extrarowheight}{5pt}% \begin{tabular}{|c|c|c|c|c|} \hline{Model} & {Precision} & {Recall} & {F1} & {\ac{Acc}}\\ \hline \hline {\ac{SVM}+\ac{ADASYN}} & {69.63} & {69.15} & {69.38} & {70.32}\\ \hline {RandomForest+\ac{ADASYN}} & {71.24} & {69.14} & {70.17} & {73.02}\\ \hline {\ac{BERT}} & {81.65} & {80.69} & {81.16} & {80.92}\\ \hline {\ac{ParsBERT}} & {84.67} & {79.42} & {81.96} & {81.11}\\ \hline {\ac{ALBERT}} & {75.75} & {64.09} & {69.43} & {70.52}\\ \hline {\ac{ParsBERT}+\ac{ADASYN}} & {84.96} & {85.64} & {85.29} & {\textbf{85.48}}\\ \hline \end{tabular} \label{tbl:allstance} \end{table*} \section{Article to Claim} Best stance detection models on both \ac{ML} and \ac{DL} models are evaluated on \ac{A2C} task. The length of 400 characters is considered for the maximum length of article content. Table \ref{tbl:allstance} show those models performance on both \ac{H2C} and \ac{A2C} task. Using \ac{ADASYN} oversampler and \ac{ParsBERT} as the pre-trained language leads to our best results on both \ac{H2C} and \ac{A2C} models. We achieved 80.62\% accuracy score on the \ac{H2C} task. Out experiments on \ac{A2C} achieved a lower score than the \ac{H2C} task because inferring from longer text is a harder task for the model. \begin{table*}[t] \centering \small \caption{Comparison between \ac{A2C} \ac{ML} and \ac{DL} models.} \def\arraystretch{1.3}% \setlength{\extrarowheight}{5pt}% \begin{tabular}{|c|c|c|c|c|} \hline {} & \multicolumn{2}{c|}{\ac{H2C}} & \multicolumn{2}{c|}{\ac{A2C}}\\ \hline {Model} & {F1} & {\ac{Acc}} & {F1} & {\ac{Acc}}\\ \hline \hline {\ac{SVM}+\ac{ADASYN}} & {69.38} & {70.32} & {65.18} & {64.49}\\ \hline {\ac{ParsBERT}} & {81.96} & {81.11} & {78.00} & {78.32}\\ \hline {\ac{ParsBERT}+\ac{ADASYN}} & {85.29} & \textbf{{85.48}} & {80.29} & \textbf{{80.62}}\\ \hline \end{tabular} \label{tbl:a2c} \end{table*} \section{Fake News} The best \ac{BERT}-based model for \ac{H2C} and \ac{A2C} are considered for this part. These model prediction base on 4 news articles are concatenated with features which are described in section \ref{sec:fakenews}. Then the overall vector is feed to a three-layer Multilayer-Perceptron model as the classifier. \ac{ADASYN} oversampling method is also used to deal with imbalanced classes. Figure \ref{fig:fakenews} illustrates the training procedure before oversampling and after oversampling the dataset. After oversampling, the trained model accuracy has converged to 99\%, while before oversampling model stops at $90.41\%$. According to figure \ref{fig:fakenews}, after adding oversampled samples, at first iterations, it is harder for the model to decrease the loss score on the train set and loss score convergent takes a longer time. Though after balancing the dataset (Figure \ref{fig:fakenews}, part (a)), the value of loss score has converged 0.2 lower than the original dataset (Figure \ref{fig:fakenews}, part (c)). \begin{figure}% \centering \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/fakenews/dataset/loss.png} }}% \qquad \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/fakenews/dataset/acc.png} }} \qquad \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/fakenews/oversampled/loss.png} }}% \qquad \subfloat[\centering]{{\includegraphics[width=6cm]{statistics/fakenews/oversampled/acc.png}}} \caption{Left figures illustrate loss score and right figures illustrate accuracy score of train and test data during training procedure for each iteration. (a, b) Training procedure on fake news detection model trained on the \cite{stance_persian} dataset. (c, d) Training procedure on fake news detection model trained on oversampled \cite{stance_persian} dataset by \ac{ADASYN} (\cite{adasyn}) algorithm.}% \label{fig:fakenews}% \end{figure}
{ "alphanum_fraction": 0.7593974123, "avg_line_length": 89.7961165049, "ext": "tex", "hexsha": "2b6cb253562d76ff8cd0a2db050521ec752e71ea", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "06008f61f77ae5257cabbf7216edf9bcc37c9986", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "mahsaghn/FinalProject_Doc", "max_forks_repo_path": "07-experiments.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "06008f61f77ae5257cabbf7216edf9bcc37c9986", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "mahsaghn/FinalProject_Doc", "max_issues_repo_path": "07-experiments.tex", "max_line_length": 1028, "max_stars_count": 2, "max_stars_repo_head_hexsha": "06008f61f77ae5257cabbf7216edf9bcc37c9986", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "mahsaghn/FinalProject_Doc", "max_stars_repo_path": "07-experiments.tex", "max_stars_repo_stars_event_max_datetime": "2021-10-03T14:55:34.000Z", "max_stars_repo_stars_event_min_datetime": "2021-10-03T14:42:52.000Z", "num_tokens": 7892, "size": 27747 }
\documentclass[11pt,a5paper]{article} \usepackage[utf8]{inputenc} \usepackage[top=1cm]{geometry} \usepackage{hyperref} \title{Checkers - credits} \author{Filip Jagiełłowicz\\ \small binary\[email protected]} \date{March\\2018} \begin{document} \maketitle \section{About project} The game have been created as an university project. \section{License} The project is avaible for anyone for use under MIT License (see LICENSE text file in main game folder). \section{Credits} The graphic in the game (board tiles and checker pieces) have been created by Lanea Zimmerman, released under CC-BY 3.0 and OGA-BY 3.0 and downloaded from \href{https://opengameart.org/content/boardgame-tiles}{Open Game Art}.\\ The fonts used in game are: \begin{itemize} \item Anonymous Pro designed by Mark Simonson available under OFL license.\\Link: \href{https://fonts.google.com/specimen/Anonymous+Pro}{Google Fonts} \item Misc Fixed \end{itemize} The game code has been written in Python 3.6.4.\\ Pygame 1.9.4 has been used for displaying graphics. \end{document}
{ "alphanum_fraction": 0.7706161137, "avg_line_length": 31.9696969697, "ext": "tex", "hexsha": "22f8badb1dc7dc511414a103c1e05297aa792a3f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6d48c74f45599f8ec166023ec731378a49e1ea67", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "binary-b/python-checkers", "max_forks_repo_path": "docs/credits.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "6d48c74f45599f8ec166023ec731378a49e1ea67", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "binary-b/python-checkers", "max_issues_repo_path": "docs/credits.tex", "max_line_length": 227, "max_stars_count": null, "max_stars_repo_head_hexsha": "6d48c74f45599f8ec166023ec731378a49e1ea67", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "binary-b/python-checkers", "max_stars_repo_path": "docs/credits.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 298, "size": 1055 }
\documentclass[conference]{IEEEtran} \usepackage{cite} \usepackage{amsmath,amssymb,amsfonts} \usepackage{algorithmic} \usepackage{graphicx} \usepackage{textcomp} \usepackage{xcolor} \usepackage{subcaption} \usepackage{tikz} \usepackage{balance} \usetikzlibrary{arrows,automata,positioning,shapes} \usetikzlibrary{decorations.pathreplacing,angles,quotes,bending} \newtheorem{example}{Example}[section] \begin{document} \title{Superlight -- A Permissionless, Light-client Only Blockchain with Self-Contained Proofs \\and BLS Signatures} \author{\IEEEauthorblockN{Roman Blum, Thomas Bocek} \IEEEauthorblockA{\textit{Distributed Systems \& Ledgers Lab} \\ \textit{University of Applied Sciences Rapperswil}\\ Rapperswil, Switzerland\\ \{roman.blum, thomas.bocek\}@hsr.ch} } \maketitle \begin{abstract} Blockchain protocols are based on a distributed, public database where stored data is guaranteed to be immutable. The requirement that all nodes have to maintain their own identical, local copy of the database ensures security while consensus mechanisms help deciding which data gets added to the database and keep powerful adversaries from derailing the system. However, since the database that forms the foundation of a blockchain is a continuously growing list of blocks, scalability is an inherent problem of this technology. Some public blockchains need a few 100 GB to Terabytes of storage. In this work, we present the concept Superlight with self-contained proofs, which is designed to improve scalability of blockchain protocols, while preserving security and decentralization. Instead of all nodes having a local copy of the whole blockchain to verify a transaction, nodes can derive the validity of a transaction by only using block headers of the chain. To keep the block headers compact, BLS signatures are used to combine signatures. We provide a formal definition of SCPs and show the required steps of a client to create a proof that is accepted by other nodes. The advantage of such a light-client-only blockchain protocol is the lower storage requirement, while the drawback is an increased computational complexity due to BLS signatures, limited use-cases due to lack of a global state, and the requirement for an interactive protocol between sender, receiver, and miner to create a transaction. \end{abstract} \section{Introduction} A fundamental component of cryptocurrencies such as Bitcoin~\cite{Nakamoto08} or Ethereum~\cite{Wood14} is the underlying blockchain. A blockchain is a block-structured database held and updated independently by each node. All nodes maintain their own copy of the blockchain. Using computational power, cryptography and consensus protocols, miners create blocks containing transactions and others agree on it. As a result, transactions can be publicly, immutably and securely stored on the blockchain, which gives the participants of the network shared control over the evolution of data. In most blockchain protocols that exist today, including Bitcoin and Ethereum, the security strongly relies on miners having a local copy of the blockchain. However, blockchain protocols are known to have scalability issues because of the maintenance of this local copy. The concept of self-contained proofs (SCPs) inverses this paradigm. Miners do not rely on the full blockchain but instead only require the block headers and the transaction in question to verify it. We show that with the utilization of SCPs in blockchain protocols, every participant of the network could potentially become a light client resulting in higher scalability. Such a scalability gain comes with a computational cost due to BLS signatures, the limitation of use-cases where a global state is not required, and the requirement to create a transaction interactively by the sender, receiver, and miner. We will discuss its advantages and disadvantages. \section{Motivation} The increasing popularity of cryptocurrencies and smart contract platforms has raised awareness in the academia and industry~\cite{BitcoinNG16, Zilliqa18, OmniLedger18}. However, the transaction throughput of these systems lacks behind its centralized counterparts and before they can become a viable alternative, blockchains must be able to scale and process transactions at speeds way above its current capabilities, especially when weighted against security and decentralization. The necessity of a future-proof scalability solution is an inevitable challenge for Bitcoin, Ethereum and every other blockchain-based consensus protocol. There are numerous groups using different approaches to find a solution, most notable off-chain state channels~\cite{Poon18Lightning, RaidenNetwork}, sharding~\cite{Zilliqa18, Luu16}, and plasma~\cite{Poon18Plasma}. The Superlight concept with the utilization of self-contained proofs discusses another scaling solution for use-cases, which do not need a global state. Many use-cases, such as transferring assets (coins/tokens) do not need a global state. Also, self-contained proofs work well in a sharded environment. Sharding is a pattern~\cite{ShardingMS} in distributed software systems and commonly utilized in distributed databases. In these database systems, a shard is a horizontal partition, where each shard is stored on a separate database server instance holding its own distinct subset of data. In blockchain, a shard is a distinct subset of users, e.g., distinguished by their addresses. In a sharded blockchain scenario with SCPs, where miners only store block headers, they could be randomly assigned to different shards and only need transactions of this particular shard for verification, resulting in no cross-shard communication, lower storage requirements and faster bootstrap time. \section{Background and Related Work} Self-contained proofs as part of a sharding concept have been first introduced in~\cite{Blum18} and builds the foundation of this work. The following example simplifies the idea of SCPs. Consider a blockchain as shown in Figure~\ref{fig:SCPExample}. Assume that a user was the receiver of 100 coins in transaction $F3$ and sender of 50 coins in transaction $F6$. The user wants to spend the remaining 50 coins, disregarding previous transaction fees. A self-contained proof must contain two Merkle proofs, one for each transaction $F3$ and $F6$, i.e., $SCP = \{Proof_1, Proof_2\}$, where $Proof_1 = \{F5, F6, B2\}$ represent the proof where the user was sender of coins and $Proof_2 = \{F3, F4, A1\}$ represents the proof where the user was the receiver of coins. \begin{figure}[hbt] \centering \begin{tikzpicture}[scale=0.45,node distance=20mm, every node/.style={transform shape}, level/.style={sibling distance=40mm/#1} ] \tikzstyle{vertex}=[draw,circle,minimum size=36pt,inner sep=0pt] \tikzstyle{vertexsmall}=[draw,circle,minimum size=24pt,inner sep=0pt] \tikzset{node style/.style={state,minimum width=12mm,minimum height=12mm,rectangle}} \node[node style] (b1){}; \node[node style, right=of b1] (b2){}; \node[node style, right=of b2] (b3){}; \node[node style, right=of b3] (b4){}; \node[node style, right=of b4] (b5){}; \node[node style, right=of b5] (b6){}; \node [vertex,below=1cm of b2] (r1){$M1$} child { node [vertex] {$A1$} child { node [vertex] {$F1$} } child { node [vertex] {$F2$} } } child { node [vertex] {$A2$} child { node [vertex,very thick] {$F3$} } child { node [vertex] {$F4$} } }; \node [vertex,below=1cm of b5] (r2){$M2$} child { node [vertex] {$B1$} child { node [vertex] {$F5$} } child { node [vertex,very thick] {$F6$} } } child { node [vertex] {$B2$} child { node [vertex] {$F7$} } child { node [vertex] {$F8$} } }; \draw[>=latex,auto=left,every loop] (b2) edge node {} (b1) (b3) edge node {} (b2) (b4) edge node {} (b3) (b5) edge node {} (b4) (b6) edge node {} (b5); \draw[thin,shorten >=4pt,shorten <=4pt,>=stealth,dotted] (r1) edge node {} (b2) (r2) edge node {} (b5); \end{tikzpicture} \caption{A user has to include two Merkle proofs for $F3$ and $F6$ in the next transaction.\label{fig:SCPExample}} \end{figure} Furthermore, SCPs follow a similar idea like non-interactive proofs of proof-of-work (NIPoPoWs~\cite{Kiayias17}), where certain blockchain properties can be verified requiring resources only logarithmic in the length of the blockchain. NIPoPoWs were introduced by researchers of Cardano~\cite{Cardano}, a decentralized, public and fully open source blockchain project that supports smart contracts. It is important to point out that NIPoPoWs and SCPs differ in their goals: NIPoPoWs are short stand-alone proofs to verify that an event (e.g. a payment) happened on a proof-of-work (PoW) based blockchain without connecting to the blockchain network and without downloading all block headers~\cite{NIPoPoWs}. They aim to increase efficiency of mobile wallets and improved communication and interoperability between blockchains and sidechains. With SCPs, nodes are required to download all block headers before being able to send a transaction. \section{System Setting and Assumptions} Before presenting the design of self-contained proofs, first the system settings and underlying assumptions are presented. \textbf{Entities.} The entities in our assumption are of two kinds, that is, \begin{itemize} \item \textit{miners} who maintain the longest blockchain, validate transactions with self-contained proofs, append new blocks on top of the longest chain, and broadcast them as soon as they are discovered, and \item \textit{nodes} who use the network, calculate self-contained proofs, send and receive transactions, and validate blocks. \end{itemize} \textbf{Type of Blockchain.} We assume that our system is based on an account-based blockchain where funds can be transferred from a single account to another. An account can be controlled by the owner of the private key and is identified by an address. We do not base our assumptions on transactions involving smart-contract calls or other types of state alteration except the balance of an account. Although our prototype is implemented in the Bazo blockchain, which is a proof-of-stake (PoS) blockchain, we do not rely on a specific consensus protocol such as PoW or PoS. \textbf{Transactions} fulfill the purpose of sending units of blockchain value (e.g. coins or tokens) from one address to another. A transaction, commonly referred to as $Tx$, contains at least both addresses of the sender and receiver and a signature that proves the origin of the transaction \textit{and} a signature that proves the receiver has received this transaction. In this paper, we distinguish between \textit{sending} a transaction, which refers to the process of subtracting a particular amount of coins \textit{from} the sender, and \textit{receiving} a transaction, which refers to the process of adding a particular amount of coins \textit{to} the receiver. If a new address is identified in a transaction, the block header stores these new addresses. Creating new addresses needs a fee as this increases the size of the block header. For comparison, the current number of addresses/accounts in Ethereum is around 54 million~\cite{Acc}. Although, the Bazo blockchain supports smart contracts, we consider only asset transfers, which does not need a global state. \textbf{Bloom filters.} Each block header contains a Bloom filter. A Bloom filter is a space-efficient data structure that provides a fast way to check the existence of an element in a set and returns true or false as a result, defined by the false-positive probability of a Bloom filter~\cite{BloomFilter}. However, as the number of elements increases, the probability of returning false-positives increases, i.e., a Bloom filter can claim that an object is member of a set when it is not. Bloom filters never give false-negatives. In our assumption, a Bloom filter of a block header can be queried with an address $a$ and returns true if the block header contains any transaction where $a$ is either sender or receiver of a transaction. Since all addresses are in the block header and known beforehand, a perfect Bloom filter can be constructed by increasing the length until no collision occurs with addresses not involved in a transaction. \textbf{Merkle Proofs} and Merkle trees are a fundamental component of blockchains allowing a secure and efficient way to verify large data structures~\cite{MerkleTree}. Every block header contains a Merkle root obtained from a Merkle tree. A Merkle tree creates a single value (the Merkle root) that proves the integrity of all transactions by hashing correspondent nodes together and climbing up the tree until the root hash is obtained. As long as the root hash is publicly known and trusted, it is possible for anyone to use a Merkle proof to verify the position and integrity of a transaction within a block header, since it is computationally infeasible to guess a transaction hash that results in a particular root. Typically, each leaf of a Merkle tree represents a single transaction. The number of leaves equals the number of transactions $n$ in a block header where the height of the Merkle tree equals $log_2(n)$. A self-contained proof consists of zero or more Merkle proofs provided by the sender of the transaction. \textbf{BLS Signatures~\cite{BLS}} can efficiently aggregate signatures. This signature scheme uses curve pairing to combine signatures from multiple senders for multiple messages into one single signature. The aggregation can be done on the miner and can be verified by anyone. However, BLS signature verification is more computational intensive than regular signatures. \section{Design} \subsection{Self-Contained Proofs} Simply put, a self-contained proof consists of one or more Merkle proofs, where each Merkle proof proves the existence of a transaction in a particular block header. We can formally define a self-contained proof and its calculation as follows. A Merkle proof for block header $b$ contains a transaction hash $t$, where $t$ is the hash of the transaction we want to provide its existences in $b$, and a set of intermediate hashes $M$ required to build a Merkle tree, where $m \in M$ and $M = \{m_1, ..., m_n\}$. We can iteratively hash these values together to calculate the Merkle root, i.e., \begin{align*} h_1 &= hash(t, m_1),\\ h_2 &= hash(h_1, m_2),\\ &\;\;\vdots \notag \\ h_n &= hash(h_{n-1}, m_n), \end{align*} where $h_n$ is the computed Merkle root. As a last step, let $root_b = MerkleRoot(b)$ be the Merkle root of block header $b$. The validator compares $h_n$ with the Merkle root $root_b$ to determine if the Merkle proof is valid or not, i.e., \begin{itemize} \item if $h_n = root_b$, the proof is valid and algorithm proceeds to verify the next proof, or \item if $h_n \neq root_b$, the proof is invalid and the algorithm stops. \end{itemize} \subsection{Block Header} In order to verify the correctness of an SCP, the following information need to be present in the block header: \begin{itemize} \item New addresses found in transactions for the current block header. If a node has all block headers, it knows all addresses in the blockchain. \item Perfect Bloom filter of sender and receiver address of involved transactions in that block header. \item BLS signatures from all senders and receivers of involved transactions in that block header to verify that a sender or receiver was involved. \end{itemize} Every new address found in transactions for the current block header will be stored in the block header. It is important for a miner to know all past addresses for the creation of a perfect Bloom filter, as it needs to check that no false-positives occur. Since the block header contains a Bloom filter, two situations need to be considered: a) the sender or receiver address is not part of the Bloom filter, and b) the Bloom filter suggests that a sender or receiver is part of the Bloom filter without actually being part. While a) is non-critical, as the transaction can be included in the next block header by another miner, which can collect the Tx fee, b) is critical. If a perfect Bloom filter is created, this cannot happen, however a rogue miner could set all bits to true and creating a wrong Bloom filter. If a rogue miner sets all the bits in the Bloom filter to true, indicating that every sender address and receiver address needs to show an SCP for this block header, a single miner can effectively deactivate all accounts. In order to prevent creating a wrong Bloom filter, BLS signatures are used. Thus, each sender and receiver need to create such a signature, which will be aggregated by the miner. If the Bloom filter indicates a presence of an address, but the BLS signature is invalid, the whole block header is invalid. Such an invalid block header can be checked and rejected by any participant, as the list of all addresses is known to all the participants. \subsection{Blocks with Aggregated Transactions} SCPs work well when there is only one transaction per user and per block header. However, an adversarial user could create a fraudulent proof if there is more than one transaction sent by the same address in a single block header. Example~\ref{ex:FraudMerkleProof} demonstrates how this potential vulnerability could be exploited. \begin{example} \label{ex:FraudMerkleProof} Consider a Merkle tree as shown in Figure~\ref{fig:FraudMerkleProof}. Assume that transactions $T_1$ and $T_4$ were sent by the same user, i.e., the user as spent coins in two different transactions. Querying the Bloom filter for this block header returns \textit{true}, however, it does not return the number of transactions that are contained within the Merkle tree. The adversarial user could create a valid Merkle proof with values $\{T_1, T_2, H_2\}$, without the mention of $T_4$. \begin{figure}[hbt] \centering \begin{tikzpicture}[scale=0.5,node distance=20mm, every node/.style={transform shape}, level/.style={sibling distance=40mm/#1} ] \tikzstyle{vertex}=[draw,circle,minimum size=36pt,inner sep=0pt] \tikzset{node style/.style={state,minimum width=12mm,minimum height=12mm,rectangle}} \node[node style] (b1){}; \node[node style, right=of b1] (b2){}; \node[node style, right=of b2] (b3){}; \node [vertex,below=1cm of b2] (r1){} child { node [vertex] {$H_1$} child { node [vertex,very thick] {$T_1$} } child { node [vertex] {$T_2$} } } child { node [vertex] {$H_2$} child { node [vertex] {$T_3$} } child { node [vertex,very thick] {$T4$} } }; \draw[>=latex,auto=left,every loop] (b2) edge node {} (b1) (b3) edge node {} (b2); \draw[thin,shorten >=4pt,shorten <=4pt,>=stealth,dotted] (r1) edge node {} (b2); \end{tikzpicture} \caption{A Bloom filter returns true for any number $\geq 1$ that a set contains. This vulnerability can be exploited with fraudulent Merkle proofs.\label{fig:FraudMerkleProof}} \end{figure} \end{example} Per-block transaction aggregation mitigates the problem shown in example~\ref{ex:FraudMerkleProof}. Let $b$ be a new (empty) block header, $t$ be a transaction of the transaction pool $T$ and $M$ be the set of transactions being added to $b$, with $M \subseteq T$. A miner creates one transaction bucket, short $TxBucket$, for each unique address, resulting in $n$ buckets, where $n \leq len(M)$ and $\Sigma^n_{i = 1}\ len(TxBucket_i) = len(M)$. For the next step, the transaction bucket data structure is introduced. A $TxBucket$ consists of the following properties: \begin{itemize} \item \textbf{Address}: The address of a unique sender or receiver within a block header. \item \textbf{Relative Balance}: The sum of all transaction amounts, i.e., $RelativeBalance = \Sigma^{len(TxBucket)}_{i = 1}\ amount(Tx)$. Note that this value can be positive or negative. \item \textbf{Merkle Root}: The Merkle root is obtained from the Merkle tree constructed from all transactions where the address of the sender equals to the bucket's address. \end{itemize} As a result, querying the Bloom filter for this block header returns \textit{true} if a $TxBucket$ equals to the queried address. Fraudulent proofs are mitigated, because transactions of the same sender (address) are aggregated into a $TxBucket$. A user has to provide all the transactions within a bucket in order for others to generate the relative balance and the Merkle root for the bucket parameters. \begin{figure}[hbt] \centering \begin{tikzpicture}[scale=0.5, level 1/.style={sibling distance=40mm}, level 2/.style={sibling distance=20mm} ] \tikzstyle{vertex}=[draw,circle,minimum size=18pt,inner sep=1pt] \tikzstyle{rect}=[draw,rectangle,minimum size=12pt,inner sep=1pt] \tikzset{node style/.style={state,minimum width=18pt,minimum height=18pt,rectangle}} \node[node style] (b1){}; \node[node style, right=of b1] (b2){}; \node[node style, right=of b2] (b3){}; \node [vertex,below=5mm of b2] (r1){} child { node [vertex] {} child { node [vertex] {$A$} } child { node [vertex] (b) {$B$} } } child { node [vertex] {} child { node [vertex] {$C$} } child { node [vertex] {$D$} } }; \node [rect, below=5mm of b] (r2) {} child { node [rect] {} child { node [rect] {$A$} } child { node [rect] {$C$} } } child { node [rect] {} child { node [rect] {$C$} } child { node [rect] {$D$} } }; \draw[>=latex,auto=left,every loop] (b2) edge node {} (b1) (b3) edge node {} (b2); \draw[thin,shorten >=4pt,shorten <=4pt,>=stealth,dotted] (r1) edge node {} (b2) (r2) edge node {} (b); \end{tikzpicture} \caption{A block's Merkle root is built from a Merkle tree, where each leaf represents a unique sender or receiver address, with each leaf containing another Merkle tree, where each leaf represents a transaction.\label{fig:MerkleTreeAggTx}} \end{figure} \subsection{Client-Side Proof Calculation} A user is only able to create a self-contained proof if it can provide one Merkle proof for each transaction it was involved in. That means it needs an interactive protocol with the sender, receiver, and miner. It is important that sender and receiver know about all these transactions, where they are involved, as these transactions are part of the SCP and without it, access to assets is not possible. A user needs to keep track of all $Tx$ where the sender or the receiver of the transaction equals to the user's address. The client software of the blockchain must be adapted to these requirements. Since the receiver needs to sign the message as well, \textit{always-on} client software is required. \textbf{Always-on} client software maintains a constant connection to the network and receives all block headers sent through the network. Upon receiving a block header, the algorithm processes as follows: \begin{enumerate} \item Check if the block header has already been processed. If yes, stop the algorithm, otherwise continue. \item Check the validity of the block header: Get a list of all involved address by using the Bloom filter and the list of known addresses. For each match, check the BLS signature if the both sender and receiver have agreed on the $Tx$. If verification is successful, broadcast the block header to the network, if not, stop the algorithm. \item Save the hash of the block header to confirm that the block header has been processed. \end{enumerate} Creating a transaction with a valid SCP works as follows: \begin{enumerate} \item Create a new transaction and set the required properties, e.g. sender and receiver (including its signatures), amount, fee, etc. \item Sender and receiver need to create a BLS signature based on a unique but deterministic message. Before such a signature is provided, both sender and receiver needs to be sure that they have a valid Merkle proof for the upcoming block header. Thus, block header creation needs two phases: 1st phase, gathering transactions; and 2nd phase with a fixed set of transactions, gathering BLS signatures from senders and receivers. \item Set the array of Merkle proofs in the transaction. \item Publish the transaction to the network. \end{enumerate} \subsection{Proof Verification by Miners} \label{Design:ProofVerification} When miners try to compute a block header, they pick transactions from the transaction pool that they want to be added in the next block header. They may include any transaction they want to form a tree of transactions, which later is hashed into the Merkle root and referenced in the block's header. It is important that for a block header to be accepted by the network it needs to contain only valid transactions. It's crucial that miners follow certain rules~\cite{ProtocolRules} in order to maintain consistency across the network. To create a perfect Bloom filter, the miner uses the transactions from the transaction pool and sets the length of the Bloom filter to a certain size. Once the Bloom filter is filled with sending and receiving addresses, the miner checks all the other addresses for a match. If any of the other addresses match, the Bloom filter needs to be enlarged and the process starts over. Once a perfect Bloom filter is constructed the BLS signatures need to be aggregated by the miner. These BLS signatures need to be provided by the senders and receivers of transactions that are part of the block header. This prevents that a rogue miner can deactivate accounts by setting all bits in the Bloom filter to true. A transaction must provide a valid self-contained proof. A formal definition of the proof verification algorithm is provided below. Let $b$ be the variable that holds the current block header, where height $h$ equals $b$'s height. Furthermore, let $i$ be the index in the set of Merkle proofs $M$, where $m \in M$ and $1 \leq i \leq len(M)$. Let $T$ be the set of transactions $T$ proved by the Merkle proofs $M$, with $len(M) = len(T)$. Let $c$ be the accumulated, computed balance of coins during verification. Lastly, let $a$ be the sender's address of the transaction $x$ containing the self-contained proof. \begin{enumerate} \item Get the most recent block header, check its validity and set it to $b$, set $h = height(b)$ and check the bloom filter if it returns true for $a$. If no, set $h \leftarrow h - 1$ and repeat step. If yes, continue. \item Get the Merkle proof $m_i$ at index $i$ and check if $height(m_i) = h$. If no, stop algorithm because $M$ does not contain a proof and deems the SCP invalid. If yes, continue. \item Calculate the Merkle root $r_i$ using $m_i$ and $t_i$. If $r_i \neq MerkleRoot(b)$, stop algorithm because the Merkle proof is invalid. If $r_i = MerkleRoot(b)$ and the receiver in $t_i$ equals $a$, set $c \leftarrow c + amount(t_i)$. If $r_i = MerkleRoot(b)$ and the sender in $t_i$ equals $a$, set $c \leftarrow c - amount(t_i)$. Continue. \item Set $h \leftarrow h - 1$ and $i \leftarrow i + 1$. If $h \geq 0$, go to step 1. \item Check if the desired amount of $x$ is less or equal the computed balance, i.e., check if $amount(x) \leq c$. If yes, the self-contained proof is valid, otherwise invalid. \end{enumerate} The algorithm determines for every block header, starting from the most recent back to the genesis block header, if the Bloom filter returns true for the sender of the transaction. In case the Bloom filter returns true, the algorithm looks up the Merkle proof for this block header and compares the calculated Merkle root with the block's Merkle root. By repeating these steps, the algorithm concludes the computed balance of the sender and verifies if it is greater or equal than the amount spent in the transaction. \section{Conclusion} In this paper, we presented the Superlight concept, a light-client only blockchain. In this blockchain the information about senders and receivers is stored in a perfect Bloom filter in the block header, together with BLS signatures and all known addresses. The light-client stores private keys and self-contained proofs. With this approach, the size of a public blockchain can significantly be reduced. In a scenario where a blockchain protocol is solely based on efficient, lightweight clients, where each client is in possession of all block headers and their own transactions, clients not only have to keep their secret key private, but also the Merkle proofs. If a client loses its Merkle proofs it will be no longer be able to create a valid self-contained proof, and it will loose access it its funds. A positive side effect in this scenario is the per-miner storage decrease, because clients are responsible for their own transactions and block bodies do not have to be stored. A negative side effect is that a receiver needs to be online in order to create the BLS signature to receive funds. Furthermore, the sender, receiver, and miner need to exchange signatures in an interactive protocol. An other negative side effect is that only use-cases are supported where no local state is needed. In our prototype we used the transfer of assets, which does not need global state. At its core, self-contained proofs are an interesting way of transaction verification, allowing to transform a blockchain with light- and full-clients into a light-client-only blockchain. It offers promising scalability and more efficient mobile wallets in future blockchains. \section{Future Work} \textbf{Security Considerations.} The concept of self-contained proofs has been implemented as a proof-of-concept in Bazo~\cite{Bazo}, an open-source research blockchain to test and evaluate mechanisms such as proof-of-stake, storage pruning, sharding, etc. However, a rigorous security analysis to identify potential threats is crucial before using SCPs in production. \textbf{Proof Size.} The size of a self-contained proof in a transaction could potentially become very large when a user has to provide many small transactions in order to spend a large amount of coins in a single transaction. One way to solve this problem is to introduce checkpoints or aggregation to the blockchain. \textbf{Sharding.} We are currently exploring ways to implement self-contained proofs into a sharded blockchain environment. We may consider Merkle Patricia trees \cite{MerklePatriciaTree} when smart contracts and other additional state comes into play. \textbf{Local and Global State.} While the Superlight concept supports only local state, an efficient way has to be found to store global state in the block header. Also the language to write smart contracts needs to differentiate between local and global storage. \textbf{Interactive Protocol}. Creating a transaction requires an interactive protocol between the sender, receiver, and the miner. The miner suggests a set of transactions that will be included in a block header and proposes Merkle proofs to all its senders and receivers. Designing this protocol is challenging especially in case of churn as this would invalidate the proposed Merkle proof by the miner. \balance \begin{thebibliography}{00} \bibitem{Nakamoto08} S. Nakamoto, Bitcoin: A peer-to-peer electronic cash system, http://bitcoin.org/bitcoin.pdf, 2008. \bibitem{Wood14} G. Wood, Ethereum: A secure decentralised generalised transaction ledger, Ethereum project yellow paper, 2014, pp.1--32. \bibitem{Cardano} Cardano, https://www.cardano.org, visited on 09-12-2018. \bibitem{BitcoinNG16} I. Eyal, A. Gencer, E. Gun Sirer, R. Van Renesse, Bitcoin-NG: A Scalable Blockchain Protocol, 13th {USENIX} Symposium on Networked Systems Design and Implementation ({NSDI} 16), 2016, pp. 45--59. \bibitem{OmniLedger18} E. Kokoris-Kogias, P. Jovanovic, L. Gasser, N. Gailly, E. Syta, B. Ford, OmniLedger: A Secure, Scale-Out, Decentralized Ledger via Sharding, 2018, pp. 19--34. \bibitem{Zilliqa18} Zilliqa Team, The Zilliqa Technical Whitepaper, https://docs.zilliqa.com/whitepaper.pdf, 2018. \bibitem{Kiayias17} A. Kiayias, A. Miller, D. Zindros, Non-Interactive Proofs of Proof-of-Work, Cryptology ePrint Archive, Report 2017/963, 2017. \bibitem{NIPoPoWs} Non-Interactive Proofs of Proof-of-Work, https://nipopows.com, visited on 09-12-2018. \bibitem{Blum18} R. Blum, Scalability for the Bazo Blockchain with Sharding, https://dsl.hsr.ch, pp. 12--13, 2018. \bibitem{Poon18Lightning} J. Poon, T. Dryja, The Bitcoin Lightning Network: Scalable Off-Chain Instant Payments, https://lightning.network/lightning-network-paper.pdf, 2018. \bibitem{RaidenNetwork} Raiden Network - Fast, cheap, scalable token transfers for Ethereum, https://raiden.network, visited on 09-12-2018 \bibitem{Poon18Plasma} J. Poon, V. Buterin, Plasma: Scalable Autonomous Smart Contracts, http://plasma.io/plasma.pdf, 2018. \bibitem{Luu16} L. Luu, V. Narayanan, C. Zheng, K. Baweja, S. Gilbert, P. Saxena, A Secure Sharding Protocol For Open Blockchains, Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security, CCS '16, pp. 17--30, 2016. \bibitem{ShardingMS} Microsoft, Sharding Pattern, https://docs.microsoft.com/en-us/azure/architecture/patterns/sharding, visited on 10-12-2018. \bibitem{ProtocolRules} Protocol Rules, https://en.bitcoin.it/wiki/Protocol\_rules, visited on 10-12-2018. \bibitem{Bazo} Bazo Blockchain, https://github.com/bazo-blockchain, visited on 10-12-2018. \bibitem{BloomFilter} B. H. Bloom. Space/time trade-offs in hash coding with allowable errors, Communications of the ACM, 13(7):422–426, 1970. \bibitem{MerkleTree} R. Merkle, Method of providing digital signatures, The Board Of Trustees Of The Leland Stanford Junior University, US patent 4309569, 1982. \bibitem{MerklePatriciaTree} Merkle Patricia Tree, https://github.com/ethereum/wiki/wiki/Patricia-Tree, visited on 10-12-2018. \bibitem{BLS} D. Boneh, C. Gentry, H. Shacham, and B. Lynn: Aggregate and Verifiably Encrypted Signatures from Bilinear Maps, Eurocrypt 2003, LNCS 2656, pp. 416-432, 2003. \bibitem{Acc} Evolution of the total number of Ethereum accounts, https://www.etherchain.org/charts/totalAccounts, visited on 04-01-2019. \end{thebibliography} \end{document}
{ "alphanum_fraction": 0.7597307327, "avg_line_length": 86.4701492537, "ext": "tex", "hexsha": "015b2467c9df64eda73d3feef697c99c46c2ee99", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a3ec71b42e673c1da421de896a9de3eb0fea4e4c", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "rmnblm/papers", "max_forks_repo_path": "superlight/paper.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a3ec71b42e673c1da421de896a9de3eb0fea4e4c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "rmnblm/papers", "max_issues_repo_path": "superlight/paper.tex", "max_line_length": 1079, "max_stars_count": null, "max_stars_repo_head_hexsha": "a3ec71b42e673c1da421de896a9de3eb0fea4e4c", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "rmnblm/papers", "max_stars_repo_path": "superlight/paper.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 8416, "size": 34761 }
% JuliaCon proceedings template \documentclass{juliacon} \setcounter{page}{1} \usepackage{graphicx} \hypersetup{colorlinks=true} \begin{document} \input{header} \maketitle \begin{abstract} Flux \cite{innes:2018} is a machine learning framework, written using the numerical computing language Julia\cite{bezanson2017julia}. The framework makes writing layers as simple as writing mathematical formulae, and it's advanced AD, Zygote \cite{DBLP:journals/corr/abs-1810-07951} , applies automatic differentiation (AD) to calculate derivatives and train the model. It makes heavy use of Julia's language and compiler features to carry out code analysis and make optimisations. For example, Julia's GPU compilation support \cite{besard:2017} can be used to JIT-compile custom GPU kernels for model layers \cite{CuArrays.jl}. Flux also supports a number of a hardware options, from CPUs, GPUs and even TPUs via XLA.jl, that compiles Julia code to XLA: an advanced compiler for linear algebra that is capable of greatly optimizing speed and memory usage in large deep learning models. ONNX.jl is an Open Neural Network Exchange backend for the Flux.jl deep learning framework. ONNX.jl supports directly importing high quality ONNX standard models into Flux, thus saving time and reducing the need for additional computation resources. This paper aims at introducing ONNX.jl and explaining how it fits into the bigger picture: How we can use the Julia Language, specifically Flux.jl and ONNX.jl as a starting for high quality transfer learning of large deep learning models. \end{abstract} \section{Introduction} The Julia language was introduced to solve the two language problem: In simple words, languages that are simple to write (high-level) are very slow but those which are difficult to use (low-level) are way faster. This is because most of the high-level languages weren't written to process a large amount of data. Thus engineers, researchers and developers have a hard time developing a lot of high performance languages. At the moment, the common protocol is to write the core of the software in a low-level language (C/C++/Fortran) and wrap it in a high-level language (Python). This results in optimized performance and ease of use. The Julia language aims to make best of both worlds. It provides a high level syntax but manages to perform as fast as C (sometimes even faster). Flux.jl is a library for implementing machine learning models, written completely in the Julia programming language. At the heart of Flux.jl lies Zygote.jl: A source-to-source automatic differentiation (AD) library that makes complete use of the Julia language compiler to generate backward pass during training phase of a neural network, with complete support for control flow, recursion, closures and data structures. Implementing models in Flux.jl is as simple as writing regular Julia code. Implementing models is as simple as writing the formulae for those, and Zygote.jl will compute the derivatives seamlessly. Flux.jl also provides support for other hardware options using external packages such as CuArrays.jl and CLArrays.jl. CuArrays is written completely in Julia, making implementing GPU kernels very simple. Making a model run on GPU can be done in a hassle-free manner: It is as simple as calling a few functions to transfer data to GPU. Flux.jl also has support for running models on Google's Tensor Processing Unit (TPU). TPUs help in very fast linear algebra computation. Running Flux models on TPUs is possible through XLA.jl that compiles Julia code to XLA. The FluxML ecosystem provides a number of supporting packages that provide additional functionalities , some of them being (apart from the aforementioned Flux.jl, Zygote.jl and XLA.jl); \begin{itemize} \item ONNX.jl : Open Neural Network eXchange backend for Flux.jl \item Metalhead.jl \cite{Metalhead.jl}: Simple plug and play pretrained Flux.jl computer vision models. \item Torch.jl \cite{Torch.jl}: This package aims at exposing Torch.tensor types in Julia. \item IRTools.jl \cite{IRTools.jl} : Provides an IR format that is easy to manipulate. \item FluxJS.jl \cite{FluxJS.jl} : Runs Flux models in the browser, via tensorflow.js \item model-zoo \cite{model-zoo}: Collection of implementation of various Flux deep learning models. \end{itemize} \section{Open Neural Network eXchange (ONNX)} Open Neural Network Exchange (ONNX) \cite{bai2019} is an open ecosystem that empowers AI developers to choose the right tools as their project evolves. ONNX provides an open source format for AI models, both deep learning and traditional machine learning. ONNX defines the computation graph for a deep learning model along with various operators used in the model. It provides a set of specifications to convert a model to a basic ONNX format, and another set of specifications to get the model back from this ONNX form. At a high level, ONNX is designed to allow framework inter-operability. There are many excellent machine learning libraries in various languages : PyTorch \cite{bai2019} , TensorFlow \cite{abadi2016tensorflow} , MXNet \cite{chen2015mxnet} , and Caffe \cite{jia2014caffe} are just a few that have become very popular in recent years, but there are many others as well. Machine learning models can be converted to a serialized ONNX format which can then be run on a number devices. ONNX Runtime is an inference engine written in C++ framework used to deploy ONNX format models into production. It works on diverse hardware and support both deep learning as well as traditional machine learning models. \subsection{Where does ONNX come in?} ONNX is a format for representing deep learning models, which can be further run on numerous devices without worrying much about the implementation. This helps researchers, developers and engineer to focus on the problem in hand without worrying much about the peripherals, such as the framework to use, the ability to run a model trained using this particular framework on specialized hardware. ONNX is is usable anywhere from small mobile devices to large server farms, across chipsets and vendors, and with extensive runtimes and tools support. ONNX reduces the friction of moving trained AI models among your favorite tools and frameworks and platforms. A simple example of how ONNX is ideal for ML is the case when large deep learning models need to be deployed. Consider the simple case of deploying a Deep Learning model to an iOS application. This particular model can be implemented in any framework : TensorFlow, PyTorch, MXNet just to name a few. However, iOS applications expect to use CoreML inside the application. Up until now, developers have been porting large models to different frameworks, which is a waste of time and energy, better spent somewhere else. This is also retraining the entire model from scratch, which isn't efficient. This makes the entire process cumbersome and impractical. ONNX exists to solve this very problem : By connecting the common dots from different frameworks, ONNX makes it possible to express a model of type A to type B, thus saving time and the need to train the model again. \section{ONNX backend in Julia} ONNX.jl is an ONNX backend written in Julia for the Flux.jl machine learning framework. It can be used for high-quality inference of pretrained ONNX format machine learning models. At the heart of it, ONNX.jl solves a compiler problem by dealing with intermediate code representations to generate readable graphs. While doing this, ONNX operators are mapped to corresponding Flux layers, thus tracing out the model's computation graph at the end. This graph can then be travelled to generate the Julia code for the model. The first step towards reading any ONNX model in Julia is to have access to all the data structures in the model, that essentially hold all the required information to load the model. This information includes everything that is used to completely define the model: Hyper-parameters and parameters in the model. So in the case of a simple Convolutional Neural Network, this may contain information such as the number of layers, number of units in each layer, strides, padding, kernel size, number of filters as hyper-parameters and the trained values associated with each layer as parameters. Since ONNX models as protobuf serialized, we need a way to read this serialized data into specific data structures. ProtoBuf.jl is a Julia implementation of protocol buffers that solves this very issue (covered in section 3.1). It is used to read the ONNX model into the generated Julia data structures. Once we have the entire model present as a complex Julia structure, we need to read through this structure and map ONNX operators to corresponding Flux layers/operations (covered in section 3.2). At the same time, model weights or parameters are separately stored and saved externally as BSON serialized file. Once the model has been loaded, we end up with two files: `model.jl`: The Julia code for the machine learning model and `weights.bson`: The weights associated with the layers defined in the `model.jl` file (section 3.3). In the further sections we'll walk through the internals of these individual processes \subsection{ProtoBuf.jl} ProtoBuf.jl \cite{Protobuf.jl} is a Julia implementation of Protocol Buffers. It provides a way to read and write data to and from Julia types from I/O streams. What ProtoBuf.jl gives from onnx.proto3 is the Julian definition of various data structures that in themselves have all the required attributes to load any ONNX serialized model. As an example, as simple message defined in onnx.proto3 as: \begin{lstlisting}[language=julia] message NodeProto { repeated string input = 1; // namespace Value repeated string output = 2; // namespace Value // An optional identifier for this node in a graph. // This field MAY be absent in ths version of the // IR. string name = 3; // namespace Node // The symbolic identifier of the Operator to // execute. string op_type = 4; // namespace Operator string domain = 7; // namespace Domain // Additional named attributes. repeated AttributeProto attribute = 5; string doc_string = 6; } \end{lstlisting}{} \textit{(Code snippet taken from onnx/onnx.proto3)} Results in the corresponding Julia definition of the model as : \begin{lstlisting}[language=julia] mutable struct NodeProto <: ProtoType input::Vector{AbstractString} output::Vector{AbstractString} name::AbstractString op_type::AbstractString domain::AbstractString attribute::Vector{AttributeProto} doc_string::AbstractString NodeProto(; kwargs...) = (o=new(); fillunset(o); isempty(kwargs) || ProtoBuf._protobuild(o, kwargs); o) end \end{lstlisting} Since ONNX tries to inherit properties from diverse frameworks, ONNX serialized models can be large and complicated. While there are a number of complex generated data structures, three of those are essential towards understanding how data is stored internally: \begin{itemize} \item ModelProto: A very high-level struct that holds all the information. ONNX models are read directly into this structure. \item GraphProto: This structure captures the entire computation graph of the model. \item NodeProto and TensorProto: Information regarding individual nodes in the graph (inputs, outputs and finer attributes) and weights associated with the nodes. \end{itemize} \subsection{ModelProto} ModelProto structure is the structure that holds all the information needed to load a model. Internally, it holds data such as the version information, model version, docstring, producer details and most importantly: the computation graph. \newpage \begin{lstlisting}[language=julia] mutable struct ModelProto <: ProtoType ir_version::Int64 opset_import::Vector{OperatorSetIdProto} producer_name::AbstractString producer_version::AbstractString domain::AbstractString model_version::Int64 doc_string::AbstractString graph::GraphProto metadata_props::Vector{StringStringEntryProto} end #mutable struct ModelProto \end{lstlisting} An ONNX model, once read using ProtoBuf.jl is loaded into this ModelProto object before extracting the graph details. Naturally, at the heart of this is the \textit{graph::GraphProto} attribute that stores the computation graph of the model. \subsection{GraphProto} The GraphProto structure stores information about particular nodes in the graph. This includes the node metadata, name, input, output and the pre-trained parameters in the \textit{initializer} attribute. \begin{lstlisting}[language=julia] mutable struct GraphProto <: ProtoType node::Vector{NodeProto} name::AbstractString initializer::Vector{TensorProto} doc_string::AbstractString input::Vector{ValueInfoProto} output::Vector{ValueInfoProto} value_info::Vector{ValueInfoProto} end #mutable struct GraphProto \end{lstlisting} \subsection{TensorProto} This is the main structure that holds the raw model parameters. For example, in comvolutional layers, the weights associated with the kernel as available as dense vectors in the \textit{raw\_data} attribute. During graph traversal, these weights are extracted and reshaped according the shape that is available as a node attribute. \begin{lstlisting}[language=julia] mutable struct TensorProto <: ProtoType dims::Vector{Int64} data_type::Int32 segment::TensorProto_Segment float_data::Vector{Float32} int32_data::Vector{Int32} string_data::Vector{Array{UInt8,1}} int64_data::Vector{Int64} name::AbstractString doc_string::AbstractString raw_data::Array{UInt8,1} double_data::Vector{Float64} uint64_data::Vector{UInt64} end #mutable struct TensorProto \end{lstlisting} This is most of the information needed to build the model. In the next section, we discuss how we use DataFlow.jl to travel this graph and extract model parameters and other relevant information. \subsection{Graph operations via DataFlow.jl} Once we have the entire model data present as a ModelProto object, the next step is to travel the computation graph and capture all the operation being done in the graph while mapping those simultaneously to the corresponding Flux operators. DataFlow.jl \cite{DataFlow.jl} is a code intermediate representation format, representing Julia code as an expression graph. It provides functions for graph re-structuring , even on cyclic graphs. Graphs can then also be used to generate Julia expression. It can be efficiently used to traverse our \textit{ModelProto.graph} object. However, during this traversal we want to map ONNX operators to Flux layers and functions. In DataFlow.jl, this becomes equivalent to creating a new vertex for the required operator and calling in with appropriate Flux functions , which are inferred from the ONNX operator itself. As an example, let's consider the simple case of the \textit{BatchNorm} operator in ONNX. Relu is a commonly used activation function in neural networks that can be expressed as : \[ relu(x) = max(x, 0) \] It basically just turns negative neurons off (sets them to 0) and bypasses positive neurons. The Relu operator in ONNX performs the same operation on an entire vector elementwise. It takes in a single parameter: The input vector and returns a single value: the result of applying relu on the input. Using DataFlow.jl, this operator is mapped as : \newline \newline \begin{lstlisting}[language=julia] ops[:Relu] = function (params, x) vcall(broadcast, :relu, x) end \end{lstlisting} The definition of \textit{relu} here is defined in Flux: Once the model is written to an external model.jl file, we can include the file directly after importing Flux and all definitions should be ready for use. Other complex layers such as Convolution have more complicated implementations, but the essence remains the same, to collect all inputs and call them with the corresponding Flux function. During this process, model weights also computed and stored in a dictionary, mapping layer name to the parameters. At the end of the graph traversal, we have both the required values: the DataFlow graph containing Flux layers and the model weights corresponding to each of these layers. The DataFlow graph can converted to Julia code using \textit{DataFlow.syntax} that also assigns variable names as and when needed. This Julia code is then written to an external \textit{model.jl} file. For saving the weights, we use the BSON.jl package. BSON stands for Binary JSON, a binary encoded serialization of JSON objects. BSON.jl \cite{BSON.jl} can be used to store and load such structures, our dictionary containing the model weights being one of them. \subsection{Interface and Design:} At a top level, ONNX.jl provides a minimal interface for the user; it is just a tool for loading ONNX format models. Once the model and weight file has been successfully generated, ONNX.jl provides no further functionality. Users can then treat the resultant model as any other Flux model. \begin{lstlisting}[language=Julia] using ONNX, Flux ONNX.load_model("path_to_onnx_file") weights = ONNX.load_weights("weights.bson") model = include("model.jl") \end{lstlisting}{} \textit{ONNX.load\_model} here generates the required model and weights file. Internally, it carries out all the above mentioned graph operations. \textit{model} above can be treated as any other Flux model. The significant advantage the ONNX.jl provides is that is treats a compiler problem as a graph problem. It generates the Flux code for the model, which makes it very easy and intuitive to use the same model for further applications, such as fine-tuning or even replacing existing layers for some other use case. This is ideal in the case of applications such as neural style transfer, where it is very common to use a pre-trained network and modify it a bit as a starting point. The generated code can also be helpful for finer debugging of the model. Overall, the entire process from the ONNX serialized file to generation of model and weight file can be summarized as: \includegraphics[width=8.8cm]{onnx-3.png} \begin{center} \textit{Fig. 1: Flow diagram of ONNX.jl} \end{center} Additionally, ONNX.jl also provides helper functions for inspecting the model before loading it. \textit{ONNX.layers} reads an ONNX file and returns a list of all the layers in the model. With the growing interest around more complicated and deep models, it is possible that an ONNX model might have layers that Flux itself doesn't support. For handling these, ONNX.jl leaves a \textit{hook} for the users to implement additional functionality. A \textit{hook} is a function that doesn't have an existing implementation: one would have to write an implementation for it themselves. However any operator that also has a corresponding implementation in Flux is completely recognized by ONNX.jl at the moment. \section{Usage Scenarios} The ONNX format and ONNX.jl can be used for transfer learning in Flux, where we store knowledge while training a model and use this knowledge for some other task. The idea is that rather than random initialization of parameters for training a neural network, it's better to take an already trained model, since it leads to faster convergence. In transfer learning, we take a pretrained model and train it on another dataset, which might also have a different class distribution. Fine tuning is an approach to transfer learning where we train on a subset of training data with a smaller learning rate. Transfer learning learning has shown tremendous results in image classification, object detection, simulations, sentiment and NLP based classification in recent past. This is also pretty common when talking about tasks such as neural style transfer where we want to change the style of an image in accordance with the style of another image. Generative Adversarial Networks (GANs) have shown to deliver high quality results when trained on top of a pre-trained model. StyleGAN \cite{DBLP:journals/corr/abs-1812-04948} , for example, can use a pre-trained model to train a custom model to deliver high quality super-resolution results. \section{Related Work} In recent times several projects have come up that solve similar issue. One of the most notable project is TensorFlow's mlir (Multi-Level Intermediate Representation) \cite{lattner2020mlir} . mlir is an evolution of LLVM \cite{LLVM:CGO04} that defines a common Intermediate Representation (IR) format, which can be used to represent any DataFlow graph. This common format unifies machine learning models in TensorFlow or other frameworks. Other noteworthy approaches in this direction are PFA and NNVM. PFA \cite{10.1145/2939672.2939731} or Portable Format for Analytics is a common language that aims as easing the transition from development to production. It can be expressed within the common JSON format and has functionalities such as control structures, loops and user-defined functions. NNVM\cite{nnvm} is an end-to-end compiler for AI frameworks. It aims at solving the challenges posed by using different diverse machine learning frameworks. It consist if two major components: NNVM (Neural Network Virtual Machine) and TVM (Tensor Virtual Machine) \cite{article} . NNVM defines a common computational graph intermediate representation format and TVM implements the operators used in these computation graphs while optimizing them for the backend hardware. \section{Future Work} As ONNX.jl becomes the beginning point for various researchers interested in using Julia for their research, it is important to note that it also has certain shortcomings. The most significant is that a model can't be completely loaded unless there's an equivalent implementation of the operator in Flux.jl. An example of this is Grouped Convolutions. These variants of Convolutional layers were used in AlexNet \cite{NIPS2012_4824} and showed amazing results. However, since Flux doesn't support these at the moment, the users will need to have an implementation ready if they choose to import an ONNX model with this particular layer into Flux using ONNX.jl. On the plus side, a lot of the most commonly used layers are available in Flux and can be readily used. Another thing to note is that to run ONNX.jl generated code in some other hardware, one might need to do a little restructuring. The model should work directly on the CPU. Another challenge moving forward is that we need to constantly update ONNX.jl to support Flux's latest API changes. This also applies for the other way round: As ONNX operators are updated, we would have to update these corresponding implementations in ONNX.jl to support the newer models using these updated specifications. Over the long run, we'd need to constantly keep an eye out for such changes and adapt ONNX.jl to those. Moreover, there are subtle differences in the way Flux implements operators as compared to other frameworks. As an example, consider the simple \textit{AveragePool} layer. The way Flux implements this is by padding the input tensor appropriately and then performing the pooling operation. However, Keras-tensorflow does this by pooling and then padding. This leads to subtle changes along the edges of the tensor in the output. Such differences occur due to the way most frameworks deal with such layers, and the only way to avoid this is to check for such discrepancies. In recent past, DataFlow.jl has been superseded by another intermediate representation format tool: IRTools.jl. It provides the ability to work with both lowered and typed Julia code and can be used together with metaprogramming tools such as Cassette.jl. There has also been some talk about splitting ONNX.jl into two packages: The first one would do the code generation and DataFlow related functions while the other would be solely responsible for implementation of the ONNX operators. This would be greater control and ease while implementing layers or debugging a loaded model. This should also make implementation pretty straight-forward wherever missing. For the moment, all this continues to be done by a single package. \section{Conclusion} Developing ONNX.jl has been tremendous learning experience. From studying about Intermediate Representation formats for deep learning models with millions of parameters to loading them in just a couple of lines of code, ONNX.jl has made it very easy and straight-forward to use a high quality trained model as a starting point for many projects. Once such example I'd like to point out is DenseNet-121 model. This is deep convolutional network that has multiple Convolutional, Dense and Pooling blocks. Naturally, implementing this in any framework is going to be a challenging task. However, thanks to ONNX, we can now use an earlier implementation to import this model into any other framework. Importing this model (train in Caffe2) via ONNX.jl into Flux can be done in ~3 lines of code. I was also able to load several large computer vision models loaded from ONNX format at the time of actively developing the package. Most of these have been added to FluxML/Metalhead.jl for a direct plug-and-play use. These included: \begin{itemize} \item SqueezeNet \cite{i2016squeezenet} \item DenseNet 121 \cite{huang2016densely} \item ResNet 50 \cite{he2015deep} \item GoogleNet \cite{szegedy2014going} \end{itemize} ONNX.jl serves as a entry point for people looking to use Flux for their research, but want quick results. It combines the power of the Julia language, the elegance of Flux and the availability of a vast number of pre-trained models. This enables researchers to spend time focusing on the real issues, rather than model portability. \newpage \input{bib.tex} \end{document} % Inspired by the International Journal of Computer Applications template
{ "alphanum_fraction": 0.795470223, "avg_line_length": 62.276849642, "ext": "tex", "hexsha": "f4ec983c23716628d8cc93ff728723a3fceb191d", "lang": "TeX", "max_forks_count": 26, "max_forks_repo_forks_event_max_datetime": "2022-02-15T16:27:42.000Z", "max_forks_repo_forks_event_min_datetime": "2018-02-09T17:09:50.000Z", "max_forks_repo_head_hexsha": "83c7eb0804cea6f2fa277643ff8f5ade7bb616d5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "gwbyeon/ONNX.jl", "max_forks_repo_path": "archives/paper/paper.tex", "max_issues_count": 52, "max_issues_repo_head_hexsha": "83c7eb0804cea6f2fa277643ff8f5ade7bb616d5", "max_issues_repo_issues_event_max_datetime": "2022-03-13T18:43:47.000Z", "max_issues_repo_issues_event_min_datetime": "2018-03-15T23:26:58.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "gwbyeon/ONNX.jl", "max_issues_repo_path": "archives/paper/paper.tex", "max_line_length": 425, "max_stars_count": 105, "max_stars_repo_head_hexsha": "83c7eb0804cea6f2fa277643ff8f5ade7bb616d5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "gwbyeon/ONNX.jl", "max_stars_repo_path": "archives/paper/paper.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-06T07:02:56.000Z", "max_stars_repo_stars_event_min_datetime": "2018-02-11T14:16:14.000Z", "num_tokens": 5832, "size": 26094 }
\newpage \section{Particle characterization and particle tracking using interference properties} \label{sec:chapter2} \subsection{Introduction} Properties of coherent light to produce interference is widely use in metrology for a long time with, for example, the famous Fabry-Pérot \cite{fabry_theorie_1899, perot_application_1899} and Michelson interferometers \cite{michelson_relative_1887}. The latter was initially used to measure earth's rotation and is still used today, in particular, for the recent measurement of gravitational waves \cite{ligo_scientific_collaboration_and_virgo_collaboration_gw151226_2016}. Since the beginning of the century, interest on tracking and characterizing colloidal particles risen thanks to the democratization of micro fluidics and lab-on-a-chip technologies. In the following I will provide some insights on the three most used : \begin{itemize} \item Reflection Interference Contrast Microscopy (\gls{RICM}) \item Lorenz-Mie fit \item Rayleigh-Sommerfeld back-propagation \end{itemize} The first one, \gls{RICM}, uses the principle of optical difference path as a Michelson interferometer. The other two, uses the interference between the light scattered by the colloid and the incident light. Generally, both of the sources are colinear, thus, speak of in-line holography. \subsection{Reflection Interference Contrast Microscopy} \begin{figure}[h] \centering \includegraphics[scale=1]{02_body/chapter2/images/RICM.png} \caption{Figure from \cite{davies_elastohydrodynamic_2018} representing \gls{RICM} with two wavelengths. (a) Left: interference patterns created with a wavelength $\lambda_1 = 532$ nm (scale bar $ 5~\mathrm{\mu m}$). Right: radial intensity profile (black dots) extracted from the image, azimuthally averaged (magenta line) and fitted with Eq.\ref{Eq.RICM} to measure the height of the particle (here $h$). (b) Same as (a) with a wavelength $\lambda_2 = 635$ nm. (c) Time series of the height of a particle $h$ (green: $ \lambda_1$, magenta: $\lambda_2$) and the particle velocity measured along the flow in blue. } \label{fig.RICM} \end{figure} Reflection Interference Contrast Microscopy was first introduced in cell biology by Curtis to study embryonic chick heart fibroblast \cite{curtis_mechanism_1964} in 1964. \gls{RICM} gained in popularity 40 years after both in biology and physics \cite{filler_reflection_2000, siver_use_2000, weber_2_2003, limozin_quantitative_2009, nadal_probing_2002, raedler_measurement_1992}. It was also used recently in soft matter physics to study elastohydrodynamic lift at a soft wall \cite{davies_elastohydrodynamic_2018}. When we illuminate a colloid with a plane wave from the bottom, a part of the light is reflected at the surface of the glass substrate and at the colloid's surface. The difference of optical path between two reflection create interference patterns. Let's take an interest at the mathematical description of this phenomenon. In the far field, we can describe two different one-dimensional electric field vectors of the same pulsation $\omega$ \cite{f_bohren_absorption_1998} as: \begin{equation} \vec{E}_1(\vec{r}, t) = \vec{E}_{0_1} \cos(\vec{k}_1 \cdot \vec{r} - \omega t + \epsilon_1) ~, \end{equation} and \begin{equation} \vec{E}_2(\vec{r}, t) = \vec{E}_{0_2} \cos (\vec{k}_2 \cdot \vec{r} - \omega t + \epsilon_2) ~. \end{equation} \nomenclature{$\vec{E}$}{Electrical field} \nomenclature{$k$}{Wave number} \nomenclature{$\omega$}{Pulsation} Where the $k$ is the wave number $k=2\pi n_{\mathrm{m}}/\lambda$, $\lambda$ denoting the illumination wavelength, $n_\mathrm{m}$ the optical index of the medium, $\epsilon_{1,2}$ the initial phase of each wave and $\vec{r}$ the position from the source. Here, the origin ($\vec{r} = \vec{0}$) could be taken at the position of the first reflection (on the glass slide) thus at the particle, $\vec{r}$ would be given by the particle's height such that $|r| = z$ the particle-subtract distance. Experimentally, we measure the intensity of the interference patterns, those can be computed from the time averaged squared sum of the electric field $\vec{E} = \vec{E}_1 + \vec{E}_2$. The measured intensity is thus given by: \begin{equation} \begin{aligned} I & = \langle \vec{E}^2 \rangle = \langle \vec{E}_1^2 + \vec{E}_2^2 + 2\vec{E}_1 \cdot \vec{E}_2 \rangle = \langle \vec{E}_1^2 \rangle + \langle \vec{E}_2^2 \rangle + 2 \langle \vec{E}_1 \cdot \vec{E}_2 \rangle \\ \end{aligned} \end{equation} where $ \langle \vec{E}_1^2 \rangle $ and $\langle \vec{E}_2^2 \rangle$ are respectively given by $I_1$ and $I_2$, the incident light intensities. Using the trigonometric formula $2 \cos (a)\cos (b) = \cos (a+b) + \cos (a-b) $ we have: \begin{equation} \langle \vec{E}_1 \cdot \vec{E}_2 \rangle = \langle \frac{1}{2} \vec{E}_{0_1} \vec{E}_{0_2} \left[ \cos \left( \vec{k}_1 \cdot \vec{r} - \vec{k}_1 \cdot \vec{r} + \phi \right) + \cos \left( 2\omega t + \phi' \right) \right] \rangle~. \end{equation} As we average over the time, the second $\cos$ will vanish since in general $\langle \cos(at + b) \rangle_ t = 0$ thus: \begin{equation} \langle \vec{E}_1 \cdot \vec{E}_2 \rangle = \frac{1}{2} \langle \vec{E}_{0_1} \vec{E}_{0_2} \rangle \cos \left( \vec{k}_1 \cdot \vec{r} - \vec{k}_2 \cdot \vec{r} + \phi \right) \end{equation} with $\phi$ the phase difference between the two fields, which is generally equal to $\pi$ due to the reflection properties on a higher index. Indeed, a colloid has generally a greater optical index than the dilution medium. Finally, the total intensity can be read as: \begin{equation} I = I_1 + I_2 + 2 \sqrt{I_1 I_2} \cos \left( \vec{k}_1 \cdot \vec{r} - \vec{k}_2 \cdot \vec{r} + \phi \right) \end{equation} By taking $k_1 = - k_2$ due to the reflection properties, we have: \begin{equation} I = I_1 + I_2 + 2 \sqrt{I_1 I_2} \cos \left( \frac{4 \pi n_{\mathrm{m}}}{\lambda} z + \phi \right) \end{equation} If we now suppose that we have a spherical particle at a height $z$ we can develop the radial interference intensity $I(x)$ as \cite{ raedler_measurement_1992}: \begin{equation} I(x) = A_0 + A_1 \mathrm{e}^{-b_1 x^2} + A_2^{-b_2 x^2} \cos \left[ \frac{4\pi n_m}{\lambda}\left( g(x) + z \right) + \phi \right] \label{Eq.RICM} \end{equation} Where $A_i$ and $b_i$ are fit parameters and $g(x)$ denotes the contour of the sphere. Finally, this method is great because the equation is computationally light and permits to have a quick tracking of particles. However, as we can see on Eq.\ref{Eq.RICM}, due to the periodicity of the cosinus, the interference pattern will be the same for all heights $z$ separated by a distance $\lambda / 2n_\mathrm{m} \approx 200 $ nm (for $\lambda = 532$ nm and $n_{\mathrm{m}} = 1.33$). It is possible to extend this limitation by using 2 different wavelength to $\approx 1.2 ~ \mathrm{\mu m}$ as used in \cite{davies_elastohydrodynamic_2018}. Despite the precision of this method which can reach the $10$ nm spatial resolution; the measurement ambiguity is not compatible with the study of micro-particle Brownian motion, hence, \gls{RICM} is not usable for our context. As a matter of fact, we experimentally reach height span of a few microns. \subsection{Lorenz-Mie Fit} \label{chap:LM_fit} When a colloid is illuminated with a plane wave, a part of the light is scattered. In consequence, the superimposition of the incident field $\vec{E}_0$ and scattered field $\vec{E}_s$ interferes. The interference patterns thus obtained are called holograms. If the particle size is at the same order of magnitude or greater than the illumination wavelength, it is not possible to use Rayleigh approximations \cite{strutt_lviii_1871}. Indeed, we would need to use what we call the Lorenz-Mie theory which describes the scattering of dielectric spheres; this theory was found by Lorenz and independently by Mie in 1880 and 1908 \cite{lorenz_lysbevaegelsen_1890, mie_beitrage_1908}. It is in the early 2000 that the Lorenz-Mie theory was first used in order to track and characterize particles \cite{ovryn_imaging_2000, lee_characterizing_2007}. Since then, a lot of studies has been realized with this method \cite{katz_applications_2010}. In the following I will describe the Lorenz-Mie Fit method. In in this part, the height of the particle $z$ is the distance between the particle's center and the focal plane of the objective lens. Let the incident field be a plane wave uniformly polarized along the axis $ \hat{e}$, with an amplitude $E_0$ and propagating along the $\hat{z}$ direction : \begin{equation} \vec{E}_0(\vec{r},z) = E_0(\vec{r}) \mathrm{e}^{ikz}\hat{e} \end{equation} Let's consider a particle of radius $a$ at a position $\vec{r}_p $, the scattered field can be written using the Lorenz-Mie theory \cite{f_bohren_absorption_1998} as: \begin{equation} \vec{E}_s(\vec{r}, z) = \vec{f}_s(k(\vec{r} - \vec{r}_p))E_0(\vec{r}) \exp \left(-ikz\right) \end{equation} With $\vec{f}_s$ the Lorenz-Mie scattering function \cite{f_bohren_absorption_1998}. The intensity $I$ that we measure at $\vec{r}$ is given by the superimposition of incident and scattered waves. Since the measurements are done at the focal plane, $I$ is given by: \begin{equation} \begin{aligned} I(\vec{r}) & = |\vec{E}_s(\vec{r}, 0) + \vec{E}_0(\vec{r}, 0)|^2 \\ & = E_0^2(\vec{r}) + 2 E_0^2\operatorname{Re} \left(\vec{f}_s(k(\vec{r}- \vec{r}_p)) \hat{e}\right) + | \vec{f}_s(k(\vec{r}- \vec{r}_p)) |^2 \end{aligned} \end{equation} The most of the experimental defects on the images are due to spacial illumination variation caused by dust particle and such. It can be corrected by normalizing the image by the background. In another word, we normalize $I(\vec{r})$ by the intensity of the incident field $I_0 = E_0(\vec{r})^2$ which is the experimental background. It can be measured by different methods, one is to have an empty field of view and the other one, which is more convenient is to take the median of a stack of images. Naturally, for having the latter to work, the movie should be long enough to have the particle diffuse enough, if not a ghost of the particle will appear on the background. This process also permits getting rid of the immobile particle that could generate any additional noise. An example of hologram before and after the normalization is shown in Fig.\ref{fig.Lorenz_mie_demo} a-c). We write the normalized intensity $I/I_0$: \begin{equation} \frac{I(\vec{r})}{I_0(\vec{r})} = 1 + 2 \operatorname{Re} \left( \vec{f}_s(k(\vec{r}- \vec{r}_p)) \hat{e} \right) + | \vec{f}_s(k(\vec{r}- \vec{r}_p)) |^2 \label{Eq.normalized_Mie} \end{equation} Now that we have the analytical form of the holograms' intensity, it is possible to fit an experimental one to Eq.\ref{Eq.normalized_Mie} as shown in Fig.\ref{fig.Lorenz_mie_demo} d-e). For the sake of completeness, I will detail the Lorenz-Mie scattering function, $\vec{f}_s(k\vec{r})$ which is given by the series: \begin{equation} \vec{f}_s(k \vec{r}) = \sum _{n=1} ^{n_c} \frac { i^n (2n +1) } { n(n+1) } \left( i a_n \vec{N}^{(3)}_{eln}(k\vec{r}) - b_n \vec{M}^{(3)}_{oln}(k\vec{r}) \right) \label{Eq.Lorenz-Mie-function} \end{equation} where $\vec{N}^{(3)}_{eln}(k\vec{r})$ and $\vec{M}^{(3)}_{oln}(k\vec{r})$ are the vector spherical harmonics. $a_n$ and $b_n$ are some coefficients that depend on the particle and illumination properties. For a spherical and isotropic particle of radius $a$ and refractive index $n_\mathrm{p}$, which is illuminated by a linearly polarized plane wave, the $a_n$ and $b_n$ coefficients are expressed in terms of spherical Bessel $j_n$ and Hankel $h_n$ functions as \cite{f_bohren_absorption_1998}: \begin{equation} a_n = \frac { \zeta^2 j_n (\zeta k a)k a j_n' (k a) - j_n(ka)[\zeta kaj_n(\zeta ka)]' } { \zeta^2 j_n (\zeta k a)k a h_n^{(1)'} (k a) - h_n^{(1)}(ka)\zeta kaj_n'(\zeta ka) } ~, \label{Eq:an} \end{equation} and \begin{equation} b_n = \frac { j_n(\zeta k a) kaj_n'(ka) - j_n (ka) \zeta kaj_n'(mka) } { j_n(\zeta k a) kah_n^{(1)'}(ka) - h_n^{(1)} (ka) \zeta kaj_n '(mka) } ~, \label{Eq:bn} \end{equation} where $\zeta = n_\mathrm{p} / n_m $ and the prime notation denotes differentiation with respect to the argument. As we can see, the holograms will depends on Eq.\ref{Eq.Lorenz-Mie-function} and will vary with a lot of parameters ($\lambda$, $n_m$, $n_\mathrm{p}$, $a$ and $\vec{r}_\mathrm{p}$) which can all be fitted. In general, the illumination wavelength $\lambda$ and medium index $n_\mathrm{m}$ are known and do not need to me fitted. From only one hologram it is could thus be possible to measure precisely the position of the particle $\vec{r}_\mathrm{p}$ and in the same time characterize the radius and optical index of the colloid. As a side note, it is even possible to characterize a particle without a priori knowledge of its characteristics using Bayesian approach \cite{gregory_bayesian_2005, dimiduk_bayesian_2016}. Computing Eq.\ref{Eq.Lorenz-Mie-function} numerically brings another interesting question, as it is analytically written as a sum over $n$; one could ask after which number of terms $n_c$ the series will converge. It has actually been found that the series converge after a number of terms \cite{lentz_generating_1976} \begin{equation} n_c = k a + 4.05 (k a)^{1/3} + 2 ~. \end{equation} Consequently, larger particles' holograms will need more terms to converge and, hence, are longer to fit. As an example, the largest particles used during my thesis have a radius $a = 2.5 ~ \mathrm{\mu m}$ leading to a number of terms $ n_c = 55$ in water and $\lambda = 532$ nm, for the smallest ones, where $a = 0.5 ~ \mathrm{\mu m}$ we find $n_c = 18$ which makes a huge difference in practice. If a reader wants to evaluate an hologram given by the Lorenz-Mie theory for a peculiar particle and position, it can be done in a few lines with the \mintinline{python}{holopy} module using the following Python snippet which was used to make Fig.\ref{fig.holo_fix_n} and \ref{fig.holo_fix_z}: \begin{minted} [ frame=lines, framesep=2mm, baselinestretch=1.2, fontsize=\footnotesize, linenos ] {python} import holopy as hp from holopy.scattering import calc_holo, Sphere sphere = Sphere(n=1.59, r=1.5, center=(4/0.1, 4/0.1, 10)) # n is the optical index of the particle, r it's radius in microns # center is its center position in microns. medium_index = 1.33 illum_wavelen = 0.532 illum_polarization = (1, 0) detector = hp.detector_grid(shape=100, spacing=0.1) # shape is the size in pixel of the camera and the spacing is the pixel's size in microns. holo = calc_holo( detector, sphere, medium_index, illum_wavelen, illum_polarization, theory="auto" ) #the hologram can be directly be plotted using: hp.show(holo) \end{minted} \begin{figure}[H] \centering \includegraphics[scale=1]{02_body/chapter2/images/lorenz_mie_fit_demo/plot_lorenz_mie.pdf} \caption{a) Raw hologram of a $2.5 ~ \mathrm{\mu m}$ polystyrene particle measured experimentally with the setup detailed in the chapter \ref{chap:exp-setup}. b) Background obtained by taking the median value of the time series of images of the diffusing particle. c) Normalized hologram given by dividing a) by b). d) Result of the fit of c) using Eq.{\ref{Eq.normalized_Mie}} the particle is found to be at a height $z = 14.77 ~ \mathrm{\mu m}$. e) Comparison of the normalized radial intensity, obtained experimentally form c) and theoretically from d).} \label{fig.Lorenz_mie_demo} \end{figure} \subsubsection{Hologram dependance on the particule characteristics} As we can see with the Eq.\ref{Eq.Lorenz-Mie-function}, the in-line holograms vary with the that position, radius and optical index of the particle. For in-line holograms, since the incident and scattered field, the $x$ and $y$ position of the particle will simply be given by the center of the hologram. Thus, it is possible to track only movement of the particle only in 2 dimensions by using algorithm such as the hough transform to find the center. As a side note, in that case, it would be optimal to place the particle just above the focal plane to have an airy disk like hologram, as shown in Fig.\ref{fig.holo_fix_n} for $a = 2.5 ~ \mathrm{\mu m}$ and $z = 5 ~\mathrm{\mu m}$. \begin{figure}[H] \centering \includegraphics{02_body/chapter2/images/holo_size_exemple/holos_only_z.pdf} \caption{Radial intensity profile stack as a function of the distance between the particle center and the focal place of the objective lens, generated for a particle of radius $a = 1.5 ~\mathrm{\mu m}$ and optical index $n = 1.59$.} \label{fig:holo_onlyz} \end{figure} In order to gain some insights on how the holograms vary with the different parameters, one can to compute holograms for particles of different size and height. We will start by looking at the a particle of a radius $a = 1.5 ~ \mathrm{\mu m} $ and $n = 1.59 $ as shown in the Fig.\ref{fig:holo_onlyz}. In this case, one can observe that as the distance between the particle and the focal plane $z$ increases, the hologram's rings gets larger. Unlike a Michelson interferometer or \gls{RICM} we do not observe the rings scrolling. Additionally, this thickening of the rings can also be observed on the Fig.\ref{fig:holo_z_fit}, where hologram's intensity profile are plot as a function of the height $z$ both theoretically and experimentally in the for a polystyrene colloidal particle of radius $a = 1.5 ~ \mathrm{\mu m} $, and, for different couples of parameters on the Fig.\ref{fig.holo_fix_n}. Also, we can note that if $z$ is not large enough compared to the radius of the particle, the center of an hologram can be so bright that if the camera does not have a large enough dynamic range, the rings could not be seen. Thus, for having an optimal condition for the fits, one should take care to defocus the enough the objective lens to have $z >> a$. \begin{figure}[H] \centering \includegraphics{02_body/chapter2/images/holo_size_exemple/smallparticles.pdf} \caption{Radial intensity profile for of the particle radius $a << \lambda$ , generated for a particles of optical index $n = 1.59$ with a distance between the particle center and the focal place of the objective lens $z = 15 ~\mathrm{\mu m}$ and $\lambda = 532 ~ \mathrm{nm}$.} \label{fig:small_part_holo} \end{figure} \begin{figure} \centering \includegraphics{02_body/chapter2/images/holo_size_exemple/holos_only_r.pdf} \caption{Radial intensity profile stack as a function of the particle radius, generated for a particle of optical index $n = 1.59$ with a distance between the particle center and the focal place of the objective lens $z = 15 ~\mathrm{\mu m}$.} \label{fig:holo_onlyr} \end{figure} We can now take a look at the variation with respect to the radius of the particle as shown on the Fig.\ref{fig:holo_onlyr} for a particle of optical index $n = 1.59$ and at a distance $z = 15 ~\mathrm{\mu m }$. One can observe that for small particles compared to the wavelength $a << \lambda$ we do not observe the rings this is due to the fact that for the small particles, the scattering can be approximated using the Reyleigh theory which tells us that the scattering is isotropic. Thus, the variation of intensity around $I_0$ will be smaller for small particle. Also, in this small particle regime, the particle size will not affect the general shape of the hologram but just the intensity of the hologram as it can be seen on the Fig.\ref{fig:small_part_holo}, for particle of radius $a = 0.02 ~\mathrm{\mu m}$ to $a = 0.07 ~\mathrm{\mu m}$ for a wavelength $\lambda = 0.532 ~\mathrm{\mu m}$. Additionally, since the noise to signal ratio will be lower than for bigger particles, it will be less precise to characterize small colloids compared to the wavelength. As the particle gets bigger, the scattering become anisotropic and it mostly towards the incident plane wave direction. This effects leads to an increase of the amplitude of the rings $I/I_0$, as one can see on the Fig.\ref{fig:holo_onlyr}. Thus, the noise to signal ratio is high enough to easily discern the hologram on top of the noise as one can see on the experimental picture fig.\ref{fig.Lorenz_mie_demo}-a). One who wants to use this method should thus take care to use large enough particle for the holograms' intensity to be greater than the camera noise level. \begin{figure}[H] \centering \includegraphics{02_body/chapter2/images/holo_size_exemple/holos_only_n.pdf} \caption{Radial intensity profile stack as a function of the particle optical index, generated for a particle of radius $a = 1.5 ~ \mathrm{\mu m}$ with a distance between the particle center and the focal place of the objective lens $z = 15 ~\mathrm{\mu m}$.} \label{fig:holo_onlyn} \end{figure} Finally, one can also check how the holograms are varying with the optical index a particle. In this case it is not the particle's optical index $n_\mathrm{p}$ which will matter the most but the ratio $\zeta = n_\mathrm{p} / n_\mathrm{m} $ which can be found in the $a_n$ and $b_n$ formulas, Eq.\ref{Eq:an} and \ref{Eq:bn}. Indeed, for the scatter to happen, the optical index $n_\mathrm{p}$ of the colloid needs to be different from the optical index of the surrounding medium $n_\mathrm{m}$. Additionally, the numerical solution of the Lorenz-Mie scattering is not stable for $n_\mathrm{p} \simeq n_\mathrm{m}$. In the Fig.\ref{fig:holo_onlyn}, we can observe holograms of a particle of radius $a = 1.5 ~ \mathrm{\mu m}$ at fix distance between the particle and the focal plane of the objective lens $z=15 ~ \mathrm{\mu m}$ with a varying colloid's optical index, in water $n_\mathrm{m} = 1.33$. On the Fig.\ref{fig:holo_onlyn} one can thus observe than for $n_\mathrm{p} \simeq n_\mathrm{m}$ we do not oberve any holograms. Additionally, one can observe that the noise to signal ratio gradually increases as $n_\mathrm{p}$ became different from $n_\mathrm{m}$. One who wants to use this method should thus take care to the particle material or the solvent to have $n_\mathrm{m}$ different enough to $n_\mathrm{p}$ for the holograms' intensity to be greater than the camera noise level. \subsubsection{Lorenz-Mie conclusion} The combination of the height, optical index and radius of colloid thus gives unique holograms. This uniqueness of the holograms permits extracting precisely the position, optical index and radius an a colloid. In order to see how holograms are for different couples for parameter on the Figs.\ref{fig:holo_fix_z} and \ref{fig:holo_fix_n}, one can see possible holograms for different size and height. Additionally, one can use the the Jupyter Notebook on my github repository in order to plot any hologram \href{https://github.com/eXpensia/Ma-these/blob/main/02_body/chapter2/images/holo_size_exemple/holosize_variation.ipynb}{\faGithub}. Finally, Lorenz-Mie is the most versatile in-line holographic method, indeed, it permits tracking and characterize unique particles even without a priori knowledge. Besides, it is possible to write the Lorenz-Mie function $\vec{f}_s$ for particular cases such as anisotropic \cite{fung_holographic_2013}, non-spherical particles \cite{wang_using_2014} or particle clusters \cite{fung_holographic_2013, perry_real-space_2013} to name a few; such possibilities open the door to a lot of experimental studies. Additionally, it can reach really high precision as the tenth of nanometer on the position and radius as well as $10^{-3}$ on the optical index \cite{lee_characterizing_2007}. Unfortunately, the Lorenz-Mie fitting suffer from a major drawback which is the time needed to fit one image. For example, a 200 by 200 pixels image, of a $2.5 ~ \mathrm{\mu m}$ particle's hologram, can take up to two minutes to be fitted using a pure and straightforward python algorithm. A lot of work as been done to have faster tracking, such as random-subset fitting \cite{dimiduk_random-subset_2014}, GPU (graphical processing unit) acceleration, machine-learning \cite{yevick_machine-learning_2014, hannel_machine-learning_2018} and deep neural networks \cite{altman_catch_2020}. \begin{figure}[H] \centering \includegraphics{02_body/chapter2/images/test_tableau2.pdf} \caption{On the left, experimentally measured holograms' radial intensity profile stack, generated from a polystyrene bead of nominal radius $a = 1.5 \pm 0.035 ~ \mathrm{\mu m} $ using the experimental setup explained in chapter \ref{chap:exp-setup}. The calibration of this particle radius and optical index is shown in Fig.\ref{fig:KDErn}. On the right, the corresponding theoretical stack using the result of each individual hologram's fit.} \label{fig:holo_z_fit} \end{figure} \begin{figure} \centering \includegraphics{02_body/chapter2/images/holo_size_exemple/holos_fix_z.pdf} \caption{On the left, experimentally measured holograms' radial intensity profile stack, generated from a polystyrene bead of nominal radius $a = 1.5 \pm 0.035 ~ \mathrm{\mu m} $ using the experimental setup explained in chapter \ref{chap:exp-setup}. The calibration of this particle radius and optical index is shown in Fig.\ref{fig:KDErn}. On the right, the corresponding theoretical stack using the result of each individual hologram's fit.} \label{fig:holo_fix_z} \end{figure} \begin{figure} \centering \includegraphics{02_body/chapter2/images/holo_size_exemple/holos_fix_n.pdf} \caption{On the left, experimentally measured holograms' radial intensity profile stack, generated from a polystyrene bead of nominal radius $a = 1.5 \pm 0.035 ~ \mathrm{\mu m} $ using the experimental setup explained in chapter \ref{chap:exp-setup}. The calibration of this particle radius and optical index is shown in Fig.\ref{fig:KDErn}. On the right, the corresponding theoretical stack using the result of each individual hologram's fit.} \label{fig:holo_fix_n} \end{figure} \clearpage \newpage \subsection{Rayleigh-Sommerfeld back-propagation} Rayleigh-Sommerfeld back-propagation \cite{wilson_3d_2012} works on the same principle as the Lorenz-Mie fitting but assumes that we have small scatterers., such as : \begin{equation} |\zeta - 1| << 1 \text{ and } ka|\zeta - 1| << 1 ~. \end{equation} In this case, at the focal plane, the intensity of the scattered field is smaller than the incident field, hence, the term $|\vec{E}_s|^2$ can be ignored. Thus, the normalized intensity, Eq.\ref{Eq.normalized_Mie} can be rewritten as: \begin{equation} \frac{I(\vec{r})}{I_0(\vec{r})}= 1 + 2\operatorname{Re}\left( \frac{E_s(\vec{r},0)}{E_0(\vec{r})} \right) ~. \end{equation} If one can retrieve completely the scattered field from an image, it is possible to reconstruct it above the focal plane by convolution using the Rayleigh-Sommerfeld propagator \cite{goodman_introduction_2005}: \begin{equation} h_{-z}(\vec{r}) = \frac{1}{2 \pi} \frac{\partial}{\partial z} \frac{\mathrm{e}^{ikR}}{R} ~, \label{Eq:propagator} \end{equation} where $ R^2 = r^2 + z^2 $ and the sign convention on the propagator indicates if the particle is below or above the focal plane. Using this propagator we have: \begin{equation} E_s(\vec{r}, z) = E_z(\vec{r}, 0) \otimes h_{-z}(\vec{r}) \end{equation} By using the convolution theorem \cite{cheong_strategies_2010, goodman_introduction_2005, sherman_application_1967,schnars_digital_1994} and supposing a uniform illumination, one can write the reconstructed scattered field at a height $z$ as: \begin{equation} E_s(\vec{r}, z) \approx \frac{\mathrm{e}^{ikz}}{4\pi ^2} \int ^\infty _{- \infty} B(\vec{q}) H(\vec{q}, -z) \mathrm{e}^{i \vec{q} \cdot \vec{r}} d^2 q \label{Eq.RS} ~, \end{equation} where $B(\vec{q})$ is the Fourier transform of $I/I_0$ and $H(\vec{q}, -z)$ is given by \begin{equation} H(\vec{q}, -z) = \mathrm{e}^{iz \sqrt{k^2 - q^2}} ~. \end{equation} Finally, using Eq.\ref{Eq.RS} one can reconstruct the scattered field and intensity since $I(\vec{r}) = |E_s(\vec{r})|^2$ as shown in Fig.\ref{fig.sommerfeld}. Moreover, by finding the position where the field is the brightest as shown in red the Fig.\ref{fig.sommerfeld}, we measure the position of the particle. Those equations are way less computational intensive than the Lorenz-Mie function Eq.\ref{Eq.Lorenz-Mie-function}. Thus tracking can be way faster, moreover, Fourier transforms can be largely accelerated using GPU. Additionally, as the propagator Eq.\ref{Eq:propagator} take only into account the intensity of the image, this method does not require any information on the particle and number of particles. As a matter of fact, to write Eq.\ref{Eq.RS} one just need to assume that we have spherical colloids. Thus, this method is great to reconstruct the 3D position of a lot of particles or clusters formations. However, the major drawback is that it is the less precise of the presented measurements and that we can't use it to characterize the particles generating the holograms. \subsubsection{Numerical Rayleigh-Sommerfeld back-propagation} The \mintinline{python}{holopy} Python module also provide a set of method that permits to user the Rayleigh-Sommerfed back-propagation. Given the \mintinline{python}{hologram} variable containing all the needed metadata about the hologram such as the pixel size, medium index $n_\mathrm{n}$ and wavelength $\lambda$ and the actual image, one can then use the \mintinline{python}{propagate} method to back-propagate an hologram over a set of height \mintinline{python}{zstack} using the following Python snippet. \begin{minted} [ frame=lines, framesep=2mm, baselinestretch=1.2, fontsize=\footnotesize, linenos ] {python} import holopy as hp import numpy as np zstack = np.linspace(0, 20, 11) rec_vol = hp.propagate(holo, zstack) \end{minted} Please note that using the \mintinline{python}{propagate} this each propagation will be done by performing a convolution of the reference hologram over the distance to be propagated. However, better reconstruction can be obtained iteratively propagating holograms over short distance. The latter method is called Cascaded Free Space Propagation, and is particularly useful when the reconstructions have fine features or when propagating over large distances \cite{kreis_frequency_2002}. It can be done by specifying the argument \mintinline{python}{cfsp} to the \mintinline{python}{propagate} method. For example, to propagate three steps over each distance, we can use \mintinline{python}{hp.propagate(holo, zstack, cfsp=3)}. \begin{figure}[!ht] \centering \includegraphics[scale=2]{02_body/chapter2/images/sommerfel_demo.jpg} \caption{Figure from \cite{cheong_strategies_2010} a) Volumetric reconstruction using Eq.\ref{Eq.RS} of the scattered intensity of single colloidal sphere, colored by intensity. b) Volumetric reconstructions of $22$ individual $1.58 ~ \mathrm{\mu m}$ diameter silica spheres organized in bcc lattice using holographic optical tweezers in distilled water. Colored regions indicate the isosurface of the brightest 1 percent of reconstructed voxels.} \label{fig.sommerfeld} \end{figure} \subsubsection{Conclusion} Finally, the method we choose is the Lorenz-Mie fitting method, since this it permits the characterization of single particles. Indeed, since we are interested to fine effects near the surface, we need to know perfectly the radius of the particle we have recorded. This feature also make our all process calibration free, as we don't need to assume any physical properties. In the following, the experimental setup is going to be detailed. \subsection{Experimental setup} \label{chap:exp-setup} In order to observe the holograms we use an homemade inverted microscope as shown on the Fig.\ref{fig:picture} and shematized in Fig.\ref{fig:schema}. A sample consists of a parallelepipedic chamber ($1.5 ~ \text{cm} ~ \times ~ 1.5 ~ \text{cm} ~ \times ~ 150 ~ \mathrm{\mu m} $), made from two glass covers, a parafilm spacer, and sealed with vacuum grease, containing a dilute suspension of spherical polystyrene beads. We used 3 different sizes, of nominal radii $0.56 ~ \mathrm{\mu m}, ~ 1.5 ~ \mathrm{\mu m} \text{ and } 2.5 ~ \mathrm{\mu m} $, at room temperature $T$, in distilled water (type 1, MilliQ device) of viscosity $\eta = 1 ~ \mathrm{mPa.s}$. The sample is illuminated by a collimated laser beam with a $521 ~ \mathrm{\mu m}$ wavelength. As depicted in the chapter \ref{chap:LM_fit}, the light scattered by one colloidal particle at a given time $t$ interferes with the incident beam. An oil-immersion objective lens (x60 magnification, $1.30$ numerical aperture) collects the resulting instantaneous interference pattern, and relays it to a camera (Basler acA1920-155um) with a $51.6$ nm/pixel resolution (see Fig.\ref{fig.Lorenz_mie_demo}a)). The exposure time of the camera is set to $\tau_{\mathrm{expo}} = 3$ ms to avoid motion-induced blurring of the image, as a general rule, the particle should not diffuse more than the pixel size during that time such that here $2D\tau_{\mathrm{expo}} < 51.6$ nm. \begin{figure}[!ht] \centering \includegraphics{02_body/chapter2/images/figures_setup/photo_setup.pdf} \caption{Photo of the custom build microscope used along my thesis. It is mainly composed of Thorlabs cage system. The camera used is a Basler acA1920-155um, we use a x60 magnification and $1.30$ numerical aperture oil-immersion objective lens. The light source is a colimated $521 ~ \mathrm{\mu m}$ wavelength laser.} \label{fig:picture} \end{figure} \begin{figure}[!ht] \centering \includegraphics[scale=0.9]{02_body/chapter2/images/figures_setup/schema_setup.pdf} \caption{Schematic of the experimental setup. A laser plane wave of intensity $I_0$ illuminates the chamber containing a dilute suspension of micro-spheres in water. The light scattered by a particle interferes with the incident beam onto the focal plane of an objective lens, that magnifies the interference patten and relays it to a camera.} \label{fig:schema} \end{figure} \subsection{Hologram fitting strategy} \subsubsection{How to fasten the process ?} As presented in the section \ref{chap:LM_fit} about the Lorenz-Mie fitting, the main drawback is the time to fit an image, from $30$ seconds for the images of $100 \times 100$ pixels to a few minutes for the $500\times 500$ pixels. We can directly see a bottleneck, indeed, if we want to track one trajectory made of $100~000$ images we would need to wait a minimum of $\approx 70$ days; for a series of images that need only a few minutes to be shot experimentaly. When I started my PhD, two groups, the Grier's lab and the Manoharan's lab, had already introduced python packages, respectively, Pylorenzmie and Holopy in order to inverse holograms. They had introduced ways to only fit a set of randomly chosen pixels, and demonstrated that taking only $1\%$ of the image pixels, could lead to similar precision and improve considerably the fit's execution time \cite{dimiduk_random-subset_2014}. Unfortunately, even if this is faster, it leads to a few images per second and still is too long for the amount of data we wanted to have. Ironically, this part of my project is certainly the one where I spent the most my time, and I actually learned a lot of things on code optimization and computer cluster usage. It's around the half of my thesis, that Pylorenzmie got a new commit on their github repository which was telling that they succeeded on using GPU acceleration using CUDA. This was not an easy task since they needed to reconstruct the Bessel functions in an understandable way for the GPU, fortunately it is possible to do so by using continued fractions \cite{lentz_generating_1976}. This humongous update permits fitting whole images at a whooping speed improvement of 20 fps. At this speed, we fit the tridimension position of the particle, the radius and optical index. To have a more reliable and fast tracking, what we do is that we fit with all free parameters the first $10~000$ images of a movie. We then determine the physical properties of the colloidal particle and then fit the whole movie with only the position as a free parameter. \subsubsection{Radius and optical index characterization} Once the data of the radius and optical index retrieved, the quantity we can look at is the the distribution of measurements. Using $10 ~ 000$ measurements we can plot the histograms of the measured $a$ and $n_\mathrm{p}$. This simple histogram could suffice to measure the physical properties of the colloidal particle. However, we can go a bit further and look at the 2D histogram of the $a$ and $n_\mathrm{p}$ as presented in the fig.\ref{fig:KDErn} here smoothed using a Gaussian kernel density estimator. As we can see it is not isotropic, and it seems that the measurement of $n_\mathrm{p}$ and $a$ are correlated. \begin{figure}[!ht] \centering \includegraphics{02_body/chapter2/images/KDErn.pdf} \caption{2D Probability density function of the measurements of the optical index $n_\mathrm{p}$ and radius $a$. Black lines indicate iso-probability. Taking the $10\% $ top probability, we measure $n_\mathrm{p} = 1.585 \pm 0.002$ and $a=1.514 \pm 0.003 ~ \mathrm{\mu m}$. } \label{fig:KDErn} \end{figure}
{ "alphanum_fraction": 0.7542066825, "avg_line_length": 76.4437627812, "ext": "tex", "hexsha": "6c4110f5e5cd63c086d0c291a570811963dc5038", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bd0eb6dea929727ea081dae060a7d1aa32efafd1", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "eXpensia/Confined-Brownian-Motion", "max_forks_repo_path": "02_body/chapter2/.ipynb_checkpoints/chapter2-checkpoint.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "bd0eb6dea929727ea081dae060a7d1aa32efafd1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "eXpensia/Confined-Brownian-Motion", "max_issues_repo_path": "02_body/chapter2/.ipynb_checkpoints/chapter2-checkpoint.tex", "max_line_length": 2061, "max_stars_count": null, "max_stars_repo_head_hexsha": "bd0eb6dea929727ea081dae060a7d1aa32efafd1", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "eXpensia/Confined-Brownian-Motion", "max_stars_repo_path": "02_body/chapter2/.ipynb_checkpoints/chapter2-checkpoint.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 10632, "size": 37381 }
\section{Other}
{ "alphanum_fraction": 0.6666666667, "avg_line_length": 4.5, "ext": "tex", "hexsha": "d6c95f05a350abbfe6008024363c5dab2e83bf7e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/statistics/trees/02-00-Other.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/statistics/encryptionClassical/06-00-other.tex", "max_line_length": 15, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/statistics/encryptionClassical/06-00-other.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6, "size": 18 }
The service supports website admins to host and administer Drupal websites directed to the grand public, such as experiment or departmental central websites. Some of the most popular sites based on this service are \href{https://home.cern/}{home.cern}, \href{https://atlas.cern}{atlas.cern}, \href{https://cms.cern}{cms.cern}, \href{https://careers.cern}{careers.cern} and \href{https://visit.cern}{visit.cern}. They form CERN's main outreach channel and are critical for the Organization's reputation. Users of the service range across a wide spectrum of different professional profiles, and it's quite common that the responsibility of site building at CERN falls on administrative personnel, or personnel with little technical background in web technologies. This in turn shapes the kind of service we have to provide; it is, for example, impractical to rely on developer-centric workflows, like GitOps and CLI tools. A small fraction of our user base, however, indeed have web development experience. The consequence is that the Content Management service has a dual mission: \begin{enumerate} \item to ensure the \emph{high availability} and performance of these communication channels \item to make site building and administration accessible to a wide-ranging user base, while remaining extensible for websites needing special features \end{enumerate} %This work describes an infrastructure project that focuses on furthering the \emph{first mission}, without sacrificing the second. %Ideally, the changes should be almost transparent for non-technical site administrators, while enabling previously unavailable best-practices workflows for technical users. \subsubsection*{Control vs. customization} %All websites on the infrastructure are running the same Drupal distribution in a "multi-site" configuration, as will be explained in section \ref{sec-phys-infra}. %The distribution consists of an upstream Drupal version with a few patches for the CERN environment, plus a curated set of "central" modules. Curating the Drupal distribution, and critically, the application of \emph{security updates}, is the responsibility of the infrastructure team. However, many websites need extra features and Drupal was selected exactly because of its extensibility (see section \ref{drupal-at-cern}). Website admins should be able to use community modules, thereby extending Drupal specifically for their website -- and assuming limited responsibility to keep custom code secure. \subsection{Load characteristics} \label{sec-load} \href{https://home.cern/}{home.cern} is the most popular website at CERN, as shown in figure \ref{fig-drp-top10-cip}. Out of \emph{1043} Drupal websites currently hosted, it alone serves 32\% of monthly unique visitors. The top 10 websites together serve 79\% of all unique visitors, leaving only 1/5 of them headed for the other 1033 websites. This is an intrinsic characteristic of the service load, which is heavily skewed towards a very small number of critical websites. \begin{figure}[t] \centering \vspace{-3em} \begin{subfigure}[b]{.25\textwidth} \includegraphics[width=\textwidth]{figures/drupal-top10-uniqClientIP.png} \caption{\emph{Public outreach}: The top 10 most popular Drupal websites are shown. home.cern appears twice as \texttt{home.cern} and \texttt{home.web.cern.ch}. {\color{amethyst} \texttt{timeline.web.cern.ch}} has machine traffic.} \label{fig-drp-top10-cip} \end{subfigure} \hfill \begin{subfigure}[b]{.7\textwidth} \centering \includegraphics[width=\textwidth]{figures/website-bandwidth} \caption{\emph{Maximum sustained throughput for high traffic websites}. The maximum throughput of the highest traffic websites was recorded over a period of 5 days. The websites with the highest traffic also have the highest throughput, showing that sustained bursts of traffic are uncommon.} \label{fig:website_bandwidth} \end{subfigure} \vspace{-1.8em} \caption{Load characteristics} \vspace{-2em} \end{figure} Unique visitors over 1 month are taken as a measure of a site's popularity, or how much impact it has on the Organization's reputation, but a measure more suitable to assess an infrastructure on is the rate of HTTP requests. In section \ref{sec-experiment} we will describe an experiment on resource optimization in the Kubernetes infrastructure by assigning websites to different Quality of Service classes. The 10 websites with the highest traffic are the target of 60\% of all requests, and they have a high overlap with the most popular sites (fig. \ref{fig:website_bandwidth}). The most popular websites therefore, apart from the highest availability guarantees, need also the highest throughput. What sustained rate of requests should a website be able to handle with stable response time? To better understand how the load impacts a single website (and therefore estimate the required hosting resources), we performed the measurements of figure \ref{fig:website_bandwidth}. These observations align with expectations and requirements: critical websites should be able to handle a throughput of 30 requests per second with stable response times.
{ "alphanum_fraction": 0.7830798479, "avg_line_length": 71.0810810811, "ext": "tex", "hexsha": "7ae926cb17fcfd8a7dccf1168ffccd87d497f573", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-04-22T18:54:09.000Z", "max_forks_repo_forks_event_min_datetime": "2021-04-22T18:54:09.000Z", "max_forks_repo_head_hexsha": "7404dbf112b13e037db0a3b216f52927b5b530d7", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Oblynx/cern-drupal-k8s-papers", "max_forks_repo_path": "CHEP21/sections/service.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "7404dbf112b13e037db0a3b216f52927b5b530d7", "max_issues_repo_issues_event_max_datetime": "2021-03-07T20:32:56.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-07T20:32:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Oblynx/cern-drupal-k8s-papers", "max_issues_repo_path": "CHEP21/sections/service.tex", "max_line_length": 178, "max_stars_count": 1, "max_stars_repo_head_hexsha": "7404dbf112b13e037db0a3b216f52927b5b530d7", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Oblynx/cern-drupal-k8s-papers", "max_stars_repo_path": "CHEP21/sections/service.tex", "max_stars_repo_stars_event_max_datetime": "2021-03-25T14:15:23.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-25T14:15:23.000Z", "num_tokens": 1147, "size": 5260 }
\chapter{Implementation} \label{ch:implementation} This chapter gives an introspection of the UBX project and the derived work. There are 3 section: one fore the core libraries, necessary to run nodes, one for the blocks from the official project and one for the blocks designed and implemented in the context of this thesis. \section{Core UBX library} This library is composed by the core shared library \texttt{libubx.so} and the Lua scripts and macros necessary to link the component functions together. Please note that since all the UBX functionalities are implemented in C or C++ function, the native Lua interpreter is not capable of linking them; it fact, the LuaJIT interpreter has been used. \graffito{On \autoref{ch:deploy} Orocos has been used to deploy the node since it supports Lua components, the Orocos Lua interpreter however if currently the native one, for this reason the project itself has been forked and re-linked to the LuaJIT library to allow execution of a MicroBLX node on top of Orocos.} LuaJIT implements the FFI library which allows calling external C functions and using C data structures from pure Lua code\autocite{bib:luaffi}.\\ The \texttt{Makefile} has been expanded beyond the compilation directives already present in the official UBX project: it also contains directives to install the core libraries, the scripts and the macro into the system, plus generates a proper configuration file automatically loaded by the \texttt{ubx\_launch} command. \section{Official UBX Team blocks} The \texttt{ubx-base} projects contains blocks from the official UBX team; some of those blocks are not used or are re-implemented while others are fundamental for the simulation to work. Moreover it contains the native (basic) data types like \texttt{int}, \texttt{char}, \texttt{long}, \texttt{long long}, \texttt{short}, \texttt{double} and \texttt{float}. It worths to highlight the presence of two blocks: \begin{itemize} \item \texttt{ptrig}: an s-block that, once properly configured, trigs a list of c-blocks in a timed loop; \item \texttt{webif}: this c-block does not need to be triggered nor to be connected: it is able to open a html interface useful to configure, control and monitor the node it is added to. \end{itemize} \section{Study-case blocks} This section describes the blocks designed for the purpose of controlling a quadcopter. \subsection{Copter controller (\texttt{copter/ctrl\_copter})} This block implements the control algorithm as explained in \autoref{ch:controlalgorithm} and \autocite{marconi}. It reads the state and the set-point of the vehicle and writes a control action in the form of vectored-thrust control action (i.e. the scalar thrust and the vectored torque the propellers should generate).\\ It is implemented in C++ in order to exploit the capability of the mathematical library Armadillo that greatly simplify operations on vectors and matrices. \subsection{Extended predict-only Kalman filter (\texttt{copter/kf\_predict\_copter})} Models the behavior of the vehicle: reads the estimate of the state and the control action and generate a new estimate of the state (which is usually written back to the same i-block the state was read from).\\ This block is written in C++ and use Armadillo too. \subsection{Update-only Kalman filter (\texttt{control/kf\_update})} Fuses the sensor data with the state. Using the notation from Wikipedia\autocite{bib:wiki:kalmann}, the sensor model matrix $ H $ is read from a file specified in the UBX node configuration file while the sensor data covariance matrix $ R $ is computed on-line from the covariance embedded in the sensor data. \subsection{Fast shm memory access (\texttt{control/shm})} This general-purpose i-block is able to dynamically allocate vectors of arbitrary length of arbitrary data-types in shared memory. Is is used for fast data passing between c-blocks. Data can be accessed by blocks in the same node by properly linking the port connection.\\ Note that, if necessary, multiple shm blocks running on different nodes on the same machine can share the same memory pool, however no synchronization mechanism has been implementer to avoid race condition and therefore it has been chosen to grant write permissions only to the block that allocated the pool while other blocks do only have read access. \subsection{AHRS+ sensor data acquisition (\texttt{control/sensor\_myAHRS})} This i-block acts as bridge between the node and the physical Hardkernel AHRS+ sensor. It is worth to point out that the official library from the sensor manufacturer can not be used with the \texttt{extern "C"} directive that is indeed necessary to run C++ blocks in UBX, so a wrapper has been created. \subsection{GUI (\texttt{copter/copter\_graphic\_dump})} Even if there is a dump of the state of the controller to the terminal, a graphical output is present too. This is a c-block that when triggered reads the state and the set-point of the vehicle to generate and display a 2-D output. The block use the OpenCV library for the rendering. The block obviously refuse to start out of a graphic environment.\\ The output produced is a view of the vehicle from the top, the size and the center of the environment in wich the copter is moving can be adjusted, the current copter state and set-point is rendered respectively as a red and a green circle with a small tooth to visualize the heading. More detailed information are showed in text format on the top of the screen. \subsection{Set-point generation (\texttt{copter/prim\_trj\_gen})} The set point of the vehicle is generated by this block from 10 primitives trajectories. The trajectory code is read from a TCP socked, then the primitive trajectory is scaled on the basis of configurable parameters both in time and space and generated as a function of time. The trajectory primitive are, by now, to translate along the 4 cardinal directions, move up and down and rotate by 90 degrees clockwise or anti-clockwise.\\ The code of the primitive is passed via a TCP connection by a GUI application described in \autoref{sec:fakeplanner}, the connection is handled using the ZMQ\cite{bib:ZMQ} library. \section{Virtual remote controller} \label{sec:fakeplanner} This GUI application act as remote controller for the copter, however it does not directly generates the set-point for the vehicle, which by the way may be unfeasible because of timing constraints, but instead instructs the trajectory generation block about which pre-defined trajectory it should generate. This virtual remote controller is named "Fake Planner" because it can (should!) be replaced by a software planner (or supervisor) capable of decision taking.
{ "alphanum_fraction": 0.7988960167, "avg_line_length": 131.431372549, "ext": "tex", "hexsha": "4be9cb32d8f53fb764c73da34c69597d4326c56b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0ff0d2100d6547afb67af40c0f355bec1a3c9150", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "fmr42/MasterThesis", "max_forks_repo_path": "Chapters/Chapter08.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0ff0d2100d6547afb67af40c0f355bec1a3c9150", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "fmr42/MasterThesis", "max_issues_repo_path": "Chapters/Chapter08.tex", "max_line_length": 465, "max_stars_count": null, "max_stars_repo_head_hexsha": "0ff0d2100d6547afb67af40c0f355bec1a3c9150", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "fmr42/MasterThesis", "max_stars_repo_path": "Chapters/Chapter08.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1486, "size": 6703 }
\chapter{Features}\label{ch:features} One of the greatest challenges of \emph{Vulkan} is to offer a high hardware performance, with the lowest consumption possible and reducing the overhead. Nowadays, mobile devices have the chance of running games with a great graphical quality. One method that is employed is to reduce the use of the \gls{cpu}. This reduction is performed by a reduction in the number of delegated tasks, instead the responsibility of the \gls{gpu} is increased: use of batch tasks in the \gls{gpu}, exclusive use of the \gls{cpu} for rendering and computation. Reaching to all devices is other of the main objectives. \emph{Vulkan LunarG} implementation is available from \emph{Windows 7} to \emph{Windows 10}, \emph{Tizen}, \emph{GNU\/Linux} and \emph{Android}. The \gls{sdk} is can be installed in both \emph{Windows} and \emph{GNU\/Linux} desktop environments. In \emph{MacOS} is being developed by third parties for achieving the execution of Vulkan applications. \emph{The Brenwill Workshop} is a development company specialized on graphics software. They have proposed to create a middleware between the \emph{OpenGL} and \emph{Metal} \gls{api} implementations for getting the support of \emph{Vulkan} on \emph{MacOS} and \emph{iOS}. The solution is called \emph{Molten}, and it is part of an available graphic development framework. \emph{Vulkan} implements several improvements at technical level, which are a good reason for being the best successor of \emph{OpenGL}: \begin{itemize} \item Native muticore \gls{cpu}s scaling support. \item Use of intermediate binary format called \emph{SPIR-V}: \emph{Vulkan} generates its own code for generating shaders, so that the drivers do not require the development of a compiler for being able to render them. As the shaders are compiled, a higher range of shaders can be used per scene. The driver just is in charge of optimize and generate the final compiled code. \item Unified management of computing kernels and graphical shaders: both \gls{api} are mixed that, until now, were separated. \item Oriented to objects, there is no global state. \item States are not tied into a context, they are cached in a buffer. \item Multithreading support. \item More control over memory and synchronization. \end{itemize} There are features that have been proposed, but will be implemented in the future. For instance, providing support to multiple \gls{gpu}s of different model. The need of using \gls{sli} for being able to use several graphical cards at the same time. On the Figure \ref{fig:comparison_opengl_vulkan} the existing differences between the two \gls{api} can be appreciated more graphically. As you may notice among the features of \emph{Vulkan}, a closest access to the hardware from the applications has been attempted. Thanks to this, the need of a huge error and memory management is removed. Only a intermediate language is employed for using shaders (\emph{SPIR-V}), and the \gls{api} is the same for developing on both mobile applications and desktop applications. Knowing all these information could be concluded that \emph{Vulkan} increases the work load that the developers already have. However, things are not as they appear. \emph{Vulkan} can be applied at three different levels: \begin{enumerate} \item Use directly all these features for having a full control over the engine. \item Use and share libraries and layers for accelerating the development process. \item Use already existing and optimized engines over the \emph{Vulkan} \gls{api}. \end{enumerate} The first option is the less frequent since developers have to start from scratch. But it could be a good option for making good benchmarks. \emph{Khronos} expects the second option to be a rich area of innovation among the community and companies. As the libraries can be published as little bundles, they could be shared as Open Source that require improvements and updates. The last option could be the most tentative considering that the hard work has already done by industry giants, developers have only to pay some royalties to the propietaries of the engine; for instance \emph{Unity Engine}. On the Figure \ref{fig:vulkan_ecosystem} can be appreciated how the \emph{Vulkan} architecture is distributed. As explained, the \emph{Vulkan} implementation is placed in a lower level that the game engine. It is located over the hardware so that many of its features can be exploded directly. An interface is set up for implementing other tools and frameworks joined together with \emph{Vulkan} in the future. \begin{figure}[t] \begin{center} \includegraphics[scale=0.3]{comparison-opengl_and_vulkan} \caption{Features of \emph{OpenGL} and \emph{Vulkan} Comparison} \label{fig:comparison_opengl_vulkan} \end{center} \end{figure} \begin{figure}[t] \begin{center} \includegraphics[scale=0.3]{ecosystem-vulkan} \caption{\emph{Vulkan} Ecosystem} \label{fig:vulkan_ecosystem} \end{center} \end{figure}
{ "alphanum_fraction": 0.7822116339, "avg_line_length": 65.4155844156, "ext": "tex", "hexsha": "281da620af4d9e4c827186042d056b8e5b0e174e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c6e614f2a71778fb98381786d3eff0558c411708", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jagoPG/ud-videogames", "max_forks_repo_path": "investigacion/chapters/features.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c6e614f2a71778fb98381786d3eff0558c411708", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jagoPG/ud-videogames", "max_issues_repo_path": "investigacion/chapters/features.tex", "max_line_length": 120, "max_stars_count": null, "max_stars_repo_head_hexsha": "c6e614f2a71778fb98381786d3eff0558c411708", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jagoPG/ud-videogames", "max_stars_repo_path": "investigacion/chapters/features.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1251, "size": 5037 }
\subsection{Other meshes} In this section, some other mesh examples with irregular geometric boundaries are considered. Fig.~\ref{qdt_ex:mesh_flower} and Fig.~\ref{qdt_ex:mesh_wolli_logo} show the mesh generated for a flower input. Fig.~\ref{qdt_ex:mesh_flower_corners} and Fig.~\ref{qdt_ex:mesh_wolli_logo_sharp} highlight the sharp corner treatment of the algorithm. \begin{figure}[h!] \begin{subfigure}[b]{1\linewidth} \centering \scalebox{0.3}{ \includegraphics{quadtree/ex_images/qdt_ex_mesh_flower.eps} } \caption{Mesh for flower} \end{subfigure} \\ \begin{subfigure}[b]{1\linewidth} \centering \scalebox{0.3}{ \includegraphics{quadtree/ex_images/qdt_ex_mesh_flower_sharp_corners.eps} } \caption{Mesh for flower : Sharp corners treatment} \label{qdt_ex:mesh_flower_corners} \end{subfigure} \caption{Mesh of flower} \label{qdt_ex:mesh_flower} \end{figure} \begin{figure}[h!] \begin{subfigure}[b]{1\linewidth} \centering \scalebox{0.3}{ \includegraphics{quadtree/ex_images/qdt_ex_mesh_wolli.eps} } \caption{Mesh for woolworth logo : Different colors for differnet materials} \end{subfigure} \\ \begin{subfigure}[b]{1\linewidth} \centering \scalebox{0.3}{ \includegraphics{quadtree/ex_images/qdt_ex_mesh_wolli_sharp_corner.eps} } \caption{Mesh for woolworth logo : Sharp conors treatment} \label{qdt_ex:mesh_wolli_logo_sharp} \end{subfigure} \caption{Mesh of wolli logo} \label{qdt_ex:mesh_wolli_logo} \end{figure}
{ "alphanum_fraction": 0.675464907, "avg_line_length": 36.2391304348, "ext": "tex", "hexsha": "bf74b25283a5bb041291143615b74cd9afda4523", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c397ddc18e5ff5d6e9b8d6de2e53be4c9c7b7a2d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "fa93hws/thesis", "max_forks_repo_path": "quadtree/ex_other_mesh.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c397ddc18e5ff5d6e9b8d6de2e53be4c9c7b7a2d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "fa93hws/thesis", "max_issues_repo_path": "quadtree/ex_other_mesh.tex", "max_line_length": 136, "max_stars_count": 1, "max_stars_repo_head_hexsha": "c397ddc18e5ff5d6e9b8d6de2e53be4c9c7b7a2d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "fa93hws/thesis", "max_stars_repo_path": "quadtree/ex_other_mesh.tex", "max_stars_repo_stars_event_max_datetime": "2019-10-30T12:14:47.000Z", "max_stars_repo_stars_event_min_datetime": "2019-10-30T12:14:47.000Z", "num_tokens": 479, "size": 1667 }
%!TEX root = forallx-adl.tex \thispagestyle{empty} \onecolumn \subsubsection*{Acknowledgements} \addtocontents{toc}{\vspace{\normalbaselineskip}} \addcontentsline{toc}{chapter}{Acknowledgements, etc.} Antony Eagle would like to thank P.D.\ Magnus and Tim Button for their work from which this text derives, and those acknowledged below who helped them. Thanks also to Atheer Al-Khalfa, Caitlin Bettess, Andrew Carter, Keith Dear, Jack Garland, Bowen Jiang, Millie Lewis, Yaoying Li, Jon Opie, Matt Nestor, Jaime von Schwarzburg, and Mike Walmer for comments on successive versions of the Adelaide text.\medskip Tim Button would like to thank P.D.\ Magnus for his extraordinary act of generosity, in making \forallx\ available to everyone. Thanks also to Alfredo Manfredini Böhm, Sam Brain, Felicity Davies, Emily Dyson, Phoebe Hill, Richard Jennings, Justin Niven, and Igor Stojanovic for noticing errata in earlier versions. \medskip P.D.\ Magnus would like to thank the people who made this project possible. Notable among these are Cristyn Magnus, who read many early drafts; Aaron Schiller, who was an early adopter and provided considerable, helpful feedback; {and} Bin Kang, Craig Erb, Nathan Carter, Wes McMichael, Selva Samuel, Dave Krueger, Brandon Lee, and the students of Introduction to Logic, who detected various errors in previous versions of the book. \subsubsection*{About the Authors} Antony Eagle is Associate Professor of Philosophy at the University of Adelaide. His research interests include metaphysics, philosophy of probability, philosophy of physics, and philosophy of logic and language. \href{https://antonyeagle.org}{\nolinkurl{antonyeagle.org}} \medskip Tim Button is a Lecturer in Philosophy at UCL. His first book, \emph{The Limits of Realism}, was published by Oxford University Press in 2013. \href{http://www.homepages.ucl.ac.uk/~uctytbu/index.html}{\nolinkurl{www.homepages.ucl.ac.uk/~uctytbu/index.html}} \medskip P.D.\ Magnus is a professor at the University at Albany, State University of New York. His primary research is in the philosophy of science. \href{https://www.fecundity.com/job/}{\nolinkurl{www.fecundity.com/job/}} \vfill In the Introduction to his \emph{Symbolic Logic}, Charles Lutwidge Dodson advised: \begin{quote} When you come to any passage you don't understand, \emph{read it again}: if you \emph{still} don't understand it, \emph{read it again}: if you fail, even after \emph{three} readings, very likely your brain is getting a little tired. In that case, put the book away, and take to other occupations, and next day, when you come to it fresh, you will very likely find that it is \emph{quite} easy. \end{quote} The same might be said for this volume, although readers are forgiven if they take a break for snacks after \emph{two} readings.
{ "alphanum_fraction": 0.780720339, "avg_line_length": 65.8604651163, "ext": "tex", "hexsha": "d7f3fa22323ed571ded428d0f795d27831d665a4", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-11-03T05:39:59.000Z", "max_forks_repo_forks_event_min_datetime": "2020-11-03T05:39:59.000Z", "max_forks_repo_head_hexsha": "4252b950acd2ad00e26130ee79c971f13cafa5ac", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "antonyeagle/forallx-adl", "max_forks_repo_path": "forallx-adl-backmatter.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4252b950acd2ad00e26130ee79c971f13cafa5ac", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "antonyeagle/forallx-adl", "max_issues_repo_path": "forallx-adl-backmatter.tex", "max_line_length": 434, "max_stars_count": null, "max_stars_repo_head_hexsha": "4252b950acd2ad00e26130ee79c971f13cafa5ac", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "antonyeagle/forallx-adl", "max_stars_repo_path": "forallx-adl-backmatter.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 718, "size": 2832 }
\documentclass[12pt]{article} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{graphicx} \setlength{\parindent}{0em} \setlength{\parskip}{0.5em} \title{CPM Vehicle Parameter Identification and Model Predictive Control} \author{Janis Maczijewski} \date{\today} \begin{document} \maketitle \section{Vehicle Dynamics Model} This is an end-to-end, grey-box model for the vehicle dynamics. The model parameters are not measured directly, but optimized to best fit the vehicle behavior. \begin{align*} \boldsymbol{x} &= [p_x, p_y, \psi, v] \\ \boldsymbol{u} &= [f, \delta, V] \\ \end{align*} \begin{center} \begin{tabular}{ r | l } $p_x$ & IPS x-position \\ $p_y$ & IPS y-position \\ $\psi$ & IPS yaw angle \\ $v$ & Odometer Speed \\ $f$ & Dimensionless motor command \\ $\delta$ & Dimensionless steering command \\ $V$ & Battery voltage \\ \end{tabular} \end{center} \begin{align} \dot{p}_x &= p_1 \cdot v \cdot (1+p_2 \cdot (\delta + p_{9})^2) \cdot \cos(\psi + p_3 \cdot (\delta + p_{9}) + p_{10}) \\ \dot{p}_y &= p_1 \cdot v \cdot (1+p_2 \cdot (\delta + p_{9})^2) \cdot \sin(\psi + p_3 \cdot (\delta + p_{9}) + p_{10}) \\ \dot{\psi} &= p_4 \cdot v \cdot (\delta + p_{9}) \\ \dot{v} &= p_5 \cdot v + (p_6 + p_7 \cdot V) \cdot \text{sign}(f) \cdot |f|^{p_8} \end{align} This is a kinematic bicycle model with some added terms to account for various errors. \begin{itemize} \item $p_1$: Compensate calibration error between IPS speed and odometer speed. \item $(1+p_2 \cdot (\delta + p_{9})^2)$: Compensate for speed differences due to different reference points between the IPS and odometer. The formulation is simplified with a second-order Taylor approximation. \item $p_3$: Side slip angle (Schwimmwinkel) due to steering. \item $p_{10}$: IPS Yaw calibration error. \item $p_{4}$: Unit conversion for the steering state. \item $p_{5}$: Speed low pass (PT1). \item $p_{6}, p_{7}$: Motor strength depends on the battery voltage. \item $p_{8}$: Compensate non-linear steady-state speed. \item $p_{9}$: Steering misalignment correction. \end{itemize} \section{Model Discretization} The model is discretized with the explicit Euler method, as follows: \begin{align} \boldsymbol{x}_{k+1} = \boldsymbol{x}_k + \Delta t \cdot f(\boldsymbol{x}_k, \boldsymbol{u}_k, \boldsymbol{p}) \end{align} This discretization is chosen for its simplicity and computational efficiency. TODO justification inaccuracy: small timestep, and discretization is included in identification. \section{Parameter Identification} Optimal parameter estimation problem for the vehicle dynamics. The optimization tries to find a set of model parameters, that best explain/reproduce the experiment data. \begin{align} \underset{\boldsymbol{x}_k^j, \boldsymbol{p}}{\text{minimize}} && \sum_{j=1}^{n_{experiments}} \sum_{k=1}^{n_{timesteps}} E(\boldsymbol{x}_k^j - \hat{\boldsymbol{x}}_k^j) \\ \text{subject to} && \boldsymbol{x}_{k+1}^j = \boldsymbol{x}_k^j + \Delta t \cdot f(\boldsymbol{x}_k^j, \hat{\boldsymbol{u}}_k^j, \boldsymbol{p}) \\ && \quad k=1..(n_{timesteps}-1) \\ && \quad j=1..n_{experiments} \\ \end{align} \begin{center} \begin{tabular}{ r | l } $\hat{\boldsymbol{x}}_k^j$ & Measured States \\ $\hat{\boldsymbol{u}}_k^j$ & Measured Inputs \\ $f$ & Vehicle dynamics model \\ $\boldsymbol{p}$ & Model parameters \\ $\Delta t$ & Constant timestep $0.02s$ \\ $E$ & Error penalty function \\ \end{tabular} \end{center} \textbf{Error penalty $E$}: Weighted quadratic error with model specific extensions. The yaw error function has a period of $2\pi$, so that a full rotation does not count as an error. This is done using $\sin(\Delta\psi/2)^2$. \textbf{Delays}: This kind of optimization problem is not well suited for identifying the delay times (Totzeiten). The delays are solved in an outer loop. The delay is guessed/assumed and the measurement data is modified by appropriately shifting it in the time index $k$. This optimization problem is solved many times for combinations of delay times. The delays that create the lowest objective value are taken as the solution. \section{Model Predictive Control} The MPC uses the identified model to calculate control inputs in real time. The MPC must run in an environment with narrow computational constraints. It must run on a Raspberry Pi Zero W and in less than 10ms per time step. This results in a computational budget of roughly 100000 to 500000 double precision floating point operations per time step. \subsection{Optimization Problem and Optimizer} The MPC is formulated as a (mostly) unconstrained minimization problem, with a box constraint: \begin{align} \underset{z \in \mathbb{R}^n}{\text{minimize}} \quad & J(z) \\ \text{subject to} \quad & -1 \leq z_i \leq 1 \end{align} The optimization problem is solved using a simple and lightweight method, the gradient descent method with momentum. The method works by iterating the following two equations: \begin{align*} m^{(j)} &:= \beta m^{(j-1)} - \nabla J(z^{(j-1)}) \\ z^{(j)} &:= \text{clip}(z^{(j-1)} + \alpha m^{(j)}) \end{align*} where $z^{(j)}$ is the approximate solution, which is improved in each iteration, $m^{(j)}$ is the momentum term, $\alpha = 0.4$ and $\beta = 0.6$ are constants, $\text{clip}(\cdot)$ applies the following function element-wise $\min(1, \max(-1, x_i))$ and $\nabla J(z^{(j-1)})$ is the gradient of the objective. A constant number of iterations $j = 1 ... N$ are executed. This guarantees a constant computation time, but not the convergence to the solution. The approximate solution $z^{(N)}$ is however sufficient in practice. \subsection{Trajectory Tracking Problem} The goal of the MPC is to make the vehicle follow a given reference trajectory. The reference trajectory is given as a Cubic Hermite spline function $r(t) = [p_{x,ref}, p_{y,ref}]$. This function is evaluated on an appropriate time grid $t_k$ to give the discrete reference trajectory $[p_{x,ref,k}, p_{y,ref,k}]$ where $k = 1 ... H_p$. The MPC objective is defined as follows: \begin{align*} J &= \sum_{k=1}^{H_p} \left[ (p_{x,k} - p_{x,ref,k})^2 + (p_{y,k} - p_{y,ref,k})^2 \right] \\ &+ 0.5 \sum_{k=1}^{H_u} (f_k - f_{k-1})^2 \\ &+ 0.01 \sum_{m=1}^{H_u} (\delta_{m} - \delta_{m-1})^2 \end{align*} The predicted states are calculated explicitly and recursively. This is known as a single shooting method. \begin{align*} \begin{bmatrix} p_{x,k+1} \\ p_{y,k+1} \\ \psi_{k+1} \\ v_{k+1} \end{bmatrix} = \begin{bmatrix} p_{x,k} \\ p_{y,k} \\ \psi_{k} \\ v_{k} \end{bmatrix} + \Delta t_{MPC} \cdot f\left( \begin{bmatrix} p_{x,k} \\ p_{y,k} \\ \psi_{k} \\ v_{k} \end{bmatrix} , \begin{bmatrix} f_{\boldsymbol{m}_k} \\ \delta_{\boldsymbol{m}_k} \\ V_{\boldsymbol{m}_k} \end{bmatrix} , \boldsymbol{p}\right), \quad k = 0 ... (H_p-1) \end{align*} where $f$ is the identified model from section (ref). $\boldsymbol{m}$ is an index vector that allows reuse of input vectors. Current implementation $\boldsymbol{m} = [1, 1, 2, 2, 3, 3]$. $H_p = 6$, $H_u = 3$ The MPC uses a longer discretization time step $\Delta t_{MPC} = 0.05s$. The inputs for the trajectory tracking problem are: \begin{itemize} \item $[p_{x,0}, p_{y,0}, \psi_{0}, v_{0}]$: The measured state with delay compensation \item $[f_{0}, \delta_{0}]$: The previous command \item $[p_{x,ref,k}, p_{y,ref,k}]$: The discrete reference trajectory \item $V_m$: The measured battery voltage \item $\boldsymbol{p}$: The model parameters \end{itemize} The variables for the trajectory tracking problem are: $z = [f_1, ..., f_{H_u}, \delta_1, ..., \delta_{H_u}]$. \subsection{Delay Compensation} The processes of measuring the state, running the MPC controller and applying the new command introduces a delay in the control loop. This delay is compensated by performing a short simulation after measuring the state and before running the MPC. Thus, the MPC will optimize the inputs for a future state, which matches the time by which the new commands take effect. This simulation requires some command inputs. These are the MPC commands from the previous timesteps. The simulation runs for 3 samples, or 60ms. \subsection{CasADi and Code Generation} The trajectory tracking problem and the optimizer are implemented symbolically with Matlab and CasADi. CasADi's code generator for C is used to obtain an efficient implementation for the Raspberry Pi. \subsection{Finetuning} The controller implementation collects statistical data about the trajectory tracking errors in real time. TODO play with all the MPC hyperparameters to minimize the tracking error. \end{document}
{ "alphanum_fraction": 0.7065681635, "avg_line_length": 36.7076271186, "ext": "tex", "hexsha": "00c63e12b8e2837758cbe6c5248141ec1a33b900", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2022-03-15T13:50:54.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-08T11:59:29.000Z", "max_forks_repo_head_hexsha": "e2e6f4ace4ebc01e8ddd87e2f4acf13e6ffdcc67", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Durrrr95/cpm_lab", "max_forks_repo_path": "tools/vehicle_dynamics_identification_and_mpc/documentation/main.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "e2e6f4ace4ebc01e8ddd87e2f4acf13e6ffdcc67", "max_issues_repo_issues_event_max_datetime": "2021-05-10T13:48:04.000Z", "max_issues_repo_issues_event_min_datetime": "2021-05-10T13:48:04.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Durrrr95/cpm_lab", "max_issues_repo_path": "tools/vehicle_dynamics_identification_and_mpc/documentation/main.tex", "max_line_length": 429, "max_stars_count": 9, "max_stars_repo_head_hexsha": "e2e6f4ace4ebc01e8ddd87e2f4acf13e6ffdcc67", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Durrrr95/cpm_lab", "max_stars_repo_path": "tools/vehicle_dynamics_identification_and_mpc/documentation/main.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-13T14:14:13.000Z", "max_stars_repo_stars_event_min_datetime": "2020-06-24T11:22:15.000Z", "num_tokens": 2614, "size": 8663 }
\documentclass[/../base.tex]{subfiles} \begin{document} \section{Data} \label{data} The data were collected from Datastream on July 2nd 2015. The countries under consideration - France, Greece, Ireland, Italy, Portugal, Spain, and the Netherlands, with Germany included for reference - comprise three `core' and five `periphery' Eurozone states. This allows comparative analysis between sovereigns in different regions. Though the exclusion of smaller Euro-area countries such as Slovakia or Finland may induce sample selection bias, the eight nations listed above were the only single currency members for whom daily data are available over the entire sample period, for all variables. The variable of interest in all specifications is the end-of-day yield on a ten-year, zero-coupon bond, taken as a deviation from the German equivalent. The results of an Augmented Dickey-Fuller (ADF) test are reported in Table \ref{tab:adf}. The null of a unit root cannot be rejected at any reasonable confidence level for any country, with the exception of the Netherlands at 10\%. Differencing the series results in comfortable rejection of the null, and this forms the final dependent variable. \begin{table}[h] \centering \begin{threeparttable} \caption{Augmented Dickey Fuller Statistics} \begin{tabular}{l c*{3}{c}} \toprule \toprule Country & bsp & $\Delta$bsp\\ \midrule Spain & -0.8594 & -33.1702 \\ France & -1.2618 & -32.3315\\ Greece & -0.6546 & -33.1763 \\ Ireland & -0.8241 & -29.1334 \\ Italy & -0.8146 & -35.2573 \\ Portugal & -0.7628 & -30.0825\\ Netherlands & -1.3654 & -33.7947 \\ \bottomrule \bottomrule \end{tabular} \centering \begin{tablenotes} \small \item Notes: Critical values: -1.2816 (10\%), -1.6449 (5\%), -2.3263 (1\%) H$_0$: At least one unit root. \end{tablenotes} \label{tab:adf} \end{threeparttable} \end{table} A plot of the differenced bond spreads for each country, shown in Figure \ref{fig:dbsp}, clearly demonstrates volatility clustering. Initially, this is helpful for generating crisis dates, where the indicator function is triggered when spreads exceed a given multiple of the standard deviation. Once the crisis dates are generated, however, volatility persistence could have implications for analysing contagion. Firstly, as with all heteroskedasticity that is unaccounted for, linear estimators will be less efficient. Standard errors will be underestimated and the null rejected too frequently. Importantly for the analysis presented here, neglecting volatility is likely to lead to more frequent rejection of the null of no contagion. As a result, a heteroskedastic and autocorrelation (HAC) robust covariance estimator is used. \begin{figure} \centering \includegraphics[width = \textwidth]{../../Data/graphics/d_bsp_graph.pdf} \caption{Differenced bond spreads for sample countries.} \label{fig:dbsp} \end{figure} Many recent analyses of market reactions to sovereign default risk have used credit default swap (CDS) spreads as a dependent variable. It is important to note that, while ostensibly determined by a similar underlying process, the evolution of CDS premia and bond yields is not identical. As outlined in \cite{fontana2010analysis} and \cite{beirne2013pricing}, the former measure suffers from several complications relating to investor risk-appetite and market liquidity that make it less suitable for drawing policy related conclusions. Common factor variables are included to account for any Europe-wide shocks. This is especially important when examining systemic crises, where overall market risk appetite or attitude to uncertainty may change in response. Following \cite{metiu2012sovereign}, the lagged spread between the Euro Interbank Offered Rate (Euribor) and German Treasury bills is used as a general European risk premium, and the log-differenced VSTOXX index is interpreted as the change in market-expected volatility. \cite{giordano2013pure} use the VIX index as an alternative to the latter, though this is based on U.S. stock volatility and so is better seen as a measure of global risk conditions. As noted in Section \ref{est}, identification of the contagion coefficient under a GIVE setup requires the presence of country specific factors to use as instruments for crisis dummies in the other country equations. A number of candidates could be suggested, however an intransigent issue in estimating linkage models is the low-frequency of macro data. Daily observations of the dependent variable are required to evade the endogeneity problems discussed in Section \ref{dating_methodology}. Even weekly or monthly observations are almost certainly too infrequent to capture the true response to a shock. With variables such as output and employment available only in monthly or quarterly varieties, it is hard to justify their use, despite their intuitive appeal. Excluding these indicators, however, may introduce new problems. It may be the case that the country-specific variables available at a daily frequency are insufficient to identify the contagion coefficient, as discussed in Section \ref{dating_methodology}. In this case, again following \cite{metiu2012sovereign}, a Euro denominated daily domestic S\&P stock index for each country is used. There are obvious endogeneity concerns, as it is highly likely that equity prices are determined at least partially by the common factors. However, likely due to lack of available alternatives, these indices have become a standard tool in the literature. \end{document}
{ "alphanum_fraction": 0.7816663706, "avg_line_length": 98.7543859649, "ext": "tex", "hexsha": "cd1d5587c2cfd6e663caab8da0da5260b182cd28", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "586537a2a8ac8a0e9444ea1fe6aa17959e555f6a", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "mjgcos/dissertation", "max_forks_repo_path": "Document/tex/sections/data.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "586537a2a8ac8a0e9444ea1fe6aa17959e555f6a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "mjgcos/dissertation", "max_issues_repo_path": "Document/tex/sections/data.tex", "max_line_length": 834, "max_stars_count": null, "max_stars_repo_head_hexsha": "586537a2a8ac8a0e9444ea1fe6aa17959e555f6a", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "mjgcos/dissertation", "max_stars_repo_path": "Document/tex/sections/data.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1336, "size": 5629 }
\chapter{Hoare Logic} In this chapter we introduce \href{http://en.wikipedia.org/wiki/Hoare_logic}{\emph{Hoare logic}}. This is a formal system that is used to prove the correctness of imperative computer programs. Hoare logic has been introduced 1969 by \href{http://en.wikipedia.org/wiki/C._A._R._Hoare}{Sir Charles Antony Richard Hoare}, who is the inventor of the \href{http://en.wikipedia.org/wiki/Quicksort}{quicksort} algorithm. \section{Preconditions and Postconditions} Hoare logic is based on preconditions and postconditions. If \texttt{P} is a program fragment and if $F$ and $G$ are logical formul\ae, then we call $F$ a precondition and $G$ a postcondition for the program fragment \texttt{P} if the following holds: If \texttt{P} is executed in a state $s$ such that the formula $F$ holds in $s$, then the execution of \texttt{P} will change the state $s$ into a new state $s'$ such that $G$ holds in $s'$. This is written as \\[0.2cm] \hspace*{1.3cm} $ \hoare{F}{P}{G} $. \\[0.2cm] We will read this notation as ``\emph{executing $P$ changes $F$ into $G$}''. The formula \\[0.2cm] \hspace*{1.3cm} $ \hoare{F}{P}{G} $ \\[0.2cm] is called a \emph{Hoare triple}. \vspace*{0.3cm} \examplesEng \begin{enumerate} \item The assignment ``\texttt{x := 1;}'' satisfies the specification \\[0.2cm] \hspace*{1.3cm} $ \hoare{\mathtt{true}}{x := 1;}{x = 1}. $ \\[0.2cm] Here, the precondition is the trivial condition ``\texttt{true}'', since the postcondition ``$x = 1$'' will always be satisfied after this assignment. \item The assignment ``\texttt{x = x + 1;}'' satisfies the specification \\[0.2cm] \hspace*{1.3cm} $ \hoare{x=1}{x := x + 1;}{x=2}. $ \\[0.2cm] If the precondition is ``$x = 1$'', then it is obvious that the postcondition has to be ``$x = 2$''. \item Let us consider the assignment ``\texttt{x = x + 1;}'' again. However, this time the precondition is given as ``$\textsl{prime}(x)$'', which is only true if $x$ is a prime number. This time, the Hoare triple is given as \\[0.2cm] \hspace*{1.3cm} $ \hoare{\textsl{prime}(x)}{x := x + 1;}{\textsl{prime}(x-1)}$. \\[0.2cm] This might look strange at first. Many students think that this Hoare triple should rather be written as \\[0.2cm] \hspace*{1.3cm} $ \hoare{\textsl{prime}(x)}{x := x + 1;}{\textsl{prime}(x+1)} $. \\[0.2cm] However, this can easily be refuted by taking $x$ to have the value $2$. Then, the precondition $\textsl{prime}(x)$ is satisfied since $2$ is a prime number. After the assignment, $x$ has the value $3$ and \\[0.2cm] \hspace*{1.3cm} $x - 1 = 3 - 1 = 2$ \\[0.2cm] still is a prime number. However, we also have \\[0.2cm] \hspace*{1.3cm} $x + 1 = 3 + 1 = 4$ \\[0.2cm] and as $4 = 2 \cdot 2$ we see that $x + 1$ is not a prime number! \end{enumerate} Let us proceed to show how the different parts of a program can be specified using Hoare triples. We start with the analysis of assignments. \subsection{Assignments} Let us generalize the previous example. Let us therefore assume that we have an assignment of the form \\[0.2cm] \hspace*{1.3cm} $ \texttt{x := h(x);} $ \\[0.2cm] and we want to investigate how the postcondition $G$ of this assignment is related to the precondition $F$. To simplify matters, let us assume that the function $h$ is invertible, i.~e.~we assume that there is a function $h^{-1}$ such that we have \\[0.2cm] \hspace*{1.3cm} $ h^{-1}\bigl(h(x)\bigr) = x \quad \mathtt{and} \quad h\bigl(h^{-1}(x)\bigr) = x $ \\[0.2cm] for all $x$. Then, the function $h^{-1}$ is the inverse of the function $h$. In order to understand the problem of computing the postcondition for the assignment statement given above, let us first consider an example. The assignment \\[0.2cm] \hspace*{1.3cm} $ \texttt{x := x + 1;} $ \\[0.2cm] can be written as \\[0.2cm] \hspace*{1.3cm} $ \texttt{x := h(x);} $ \\[0.2cm] where the function $h$ is given as \\[0.2cm] \hspace*{1.3cm} $ h(x) = x + 1 $ \\[0.2cm] and the inverse function $h^{-1}$ is \\[0.2cm] \hspace*{1.3cm} $h^{-1}(x) = x - 1$. \\[0.2cm] Now we are able to compute the postcondition of the assignment ``\texttt{x := h(x);}'' from the precondition. We have \\[0.2cm] \hspace*{1.3cm} $\hoare{F}{x := h(x);}{F\sigma}$ \quad where \quad $\sigma = \bigl[x \mapsto h^{-1}(x)\bigr]$. \\[0.2cm] Here, $F\sigma$ denotes the application of the substitution $\sigma$ to the formula $F$. The expression $F\sigma$ is computed from the expression $F$ by replacing every occurrence of the variable $x$ by the term $h^{-1}(x)$. Therefore, the substitution $\sigma$ undoes the effect of the assignment and restores the variables in $F$ to the state before the assignment. In order to understand why this is the correct way to compute the postcondition, we consider the assignment ``\texttt{x := x + 1}'' again and choose the formula $x = 7$ as precondition. Since $h^{-1}(x) = x - 1$, the substitution $\sigma$ is given as $\sigma = [ x \mapsto x - 1 ]$. Therefore, $F\sigma$ has the form \\[0.2cm] \hspace*{1.3cm} $ (x = 7)[x \mapsto x - 1] \;\equiv\; (x - 1 = 7). $ \\[0.2cm] I have used the symbol ``$\equiv$'' here in order to express that these formul\ae\ are syntactically identical. Therefore, we have \\[0.2cm] \hspace*{1.3cm} $ \hoare{x = 7}{x := x + 1;}{x - 1 = 7}. $ \\[0.2cm] Since the formula $x - 1 = 7$ is equivalent to the formula $x = 8$ the Hoare triple above can be rewritten as \\[0.2cm] \hspace*{1.3cm} $ \hoare{x = 7}{x := x + 1;}{x = 8} $ \\[0.2cm] and this is obviously correct: If the value of $x$ is $7$ before the assignment \\[0.2cm] \hspace*{1.3cm} ``\texttt{x := x + 1;}'' \\[0.2cm] is executed, then after the assignment is executed, $x$ will have the value $8$. Let us try to understand why \\[0.2cm] \hspace*{1.3cm} $\hoare{F}{x := h(x);}{F\sigma}$ \quad where \quad $\sigma = \bigl[x \mapsto h^{-1}(x)\bigr] $ \\[0.2cm] is, indeed, correct: Before the assignment ``x \texttt{:=} h(x);'' is executed, the variable $x$ has some fixed value $x_0$. The precondition $F$ is valid for $x_0$. Therefore, the formula $F[x \mapsto x_0]$ is valid before the assignment is executed. However, the variable $x$ does not occur in the formula $F[x \mapsto x_0]$ because it has been replaced by the fixed value $x_0$. Therefore, the formula \\[0.2cm] \hspace*{1.3cm} $ F[x \mapsto x_0] $ \\[0.2cm] remains valid after the assignment ``\texttt{x = h(x);}'' is executed. After this assignment, the variable $x$ is set to $h(x_0)$. Therefore, we have \\[0.2cm] \hspace*{1.3cm} $x = h(x_0)$. \\[0.2cm] Let us solve this equation for $x_0$. We find \\[0.2cm] \hspace*{1.3cm} $h^{-1}(x) = x_0$. \\[0.2cm] Therefore, after the assignment the formula \\[0.2cm] \hspace*{1.3cm} $ F[x \mapsto x_0] \equiv F[x \mapsto h^{-1}(x)]$ \\[0.2cm] is valid and this is the formula that is written as $F\sigma$ above. We conclude this discussion with another example. The unary predicate \textsl{prime} checks whether its argument is a prime number. Therefore, $\textsl{prime}(x)$ is true if $x$ is a prime number. Then we have \\[0.2cm] \hspace*{1.3cm} $\hoare{\textsl{prime}(x)}{x := x + 1;}{\textsl{prime}(x-1)}$. \\[0.2cm] The correctness of this Hoare triple should be obvious: If $x$ is a prime and if $x$ is then incremented by $1$, then afterwards $x-1$ is prime. \paragraph{Different Forms of Assignments} Not all assignments can be written in the form ``\texttt{x := h(x);}'' where the function $h$ is invertible. Often, a constant $c$ is assigned to some variable $x$. If $x$ does not occur in the precondition $F$, then we have \\[0.2cm] \hspace*{1.3cm} $ \hoare{F}{x := c;}{F \wedge x = c}. $ \\[0.2cm] The formula $F$ can be used to restrict the values of other variables occurring in the program under consideration. \paragraph{General Form of the Assignment Rule} In the literature the rule for specifying an assignment is given as \\[0.2cm] \hspace*{1.3cm} $ \hoare{F[x \mapsto t]}{x := t;}{F}. $ \\[0.2cm] Here, $t$ is an arbitrary term that can contain the variable $x$. This rule can be read as follows: \\[0.2cm] \hspace*{1.3cm} \begin{minipage}[c]{0.8\linewidth} ``\emph{If the formula $F(t)$ is valid in some state and $t$ is assigned to $x$, then after this assignment we have $F(x)$.}'' \end{minipage} \\[0.2cm] This rule is obviously correct. However, it is not very useful because in order to apply this rule we first have to rewrite the precondition as $F(t)$. If $t$ is some complex term, this is often very difficult to do. \subsection{The Weakening Rule} If a program fragment $P$ satisfies the specification \\[0.2cm] \hspace*{1.3cm} $ \hoare{F}{P}{G} $ \\[0.2cm] and if, furthermore, the formula $G$ implies the validity of the formula $H$, that is if \\[0.2cm] \hspace*{1.3cm} $G \rightarrow H$ \\[0.2cm] holds, then the program fragment $P$ satisfies \\[0.2cm] \hspace*{1.3cm} $\hoare{F}{P}{H}$. \\[0.2cm] The reasoning is as follows: If after executing $P$ we know that $G$ is valid, then, since $G$ implies $H$, the formula $H$ has to be valid, too. Therefore, the following \emph{verification rule}, which is known as the \emph{weakening rule}, is valid: \\[0.4cm] $\bruch{\quad \hoare{F}{P}{G}, \qquad G \rightarrow H \quad}{\hoare{F}{P}{H}}$ \\[0.2cm] The formul\ae\ written over the fraction line are called the \emph{premisses} and the formula under the fraction line is called the \emph{conclusion}. The conclusion and the first premiss are Hoare triples, the second premiss is a formula of first order logic. The interpretation of this rule is that the conclusion is true if the premisses are true. \subsection{Compound Statements} If the program fragments $\texttt{P}$ and $\texttt{Q}$ have the specifications \\[0.2cm] \hspace*{1.3cm} $ \hoare{F_1}{P}{G_1}$ \quad and \quad $\hoare{F_2}{Q}{G_2}$ \\[0.2cm] and if, furthermore, the postcondition $G_1$ implies the precondition $F_2$, then the composition $\texttt{P;Q}$ of $\texttt{P}$ and $\texttt{Q}$ satisfies the specification \\[0.2cm] \hspace*{1.3cm} $\hoare{F_1}{P;Q}{G_2}$. \\[0.2cm] The reasoning is as follows: If, initially, $F_1$ is satisfied and we execute $P$ then we have $G_1$ afterwards. Therefore we also have $F_2$ and if we now execute $Q$ then afterwards we will have $G_2$. This chain of thoughts is combined in the following verification rule: \\[0.4cm] \hspace*{1.3cm} $\bruch{\quad\hoare{F_1}{P}{G_1}, \qquad G_1 \rightarrow F_2, \qquad \hoare{F_2}{Q}{G_2}\quad}{\hoare{F_1}{P;Q}{G_2}} $ \\[0.2cm] If the formul\ae\ $G_1$ and $F_2$ are identical, then this rule can be simplified as follows: \\[0.4cm] \hspace*{1.3cm} $\bruch{\quad\hoare{F_1}{P}{G_1}, \qquad \hoare{G_1}{Q}{G_2}\quad}{ \hoare{F_1}{P;Q}{G_2}}$ \exampleEng Let us analyse the program fragment shown in Figure \ref{fig:swap}. We start our analysis by using the precondition \\[0.2cm] \hspace*{1.3cm} $ \texttt{x} = a \wedge \texttt{y} = b. $ \\[0.2cm] Here, $a$ and $b$ are two variables that we use to store the initial values of \texttt{x} and \texttt{y}. The first assignment yields the Hoare triple \\[0.2cm] \hspace*{1.3cm} $\hoare{\texttt{x} = a \wedge \texttt{y} = b}{x := x - y;}{(\texttt{x} = a \wedge \texttt{y} = b)\sigma}$ \\[0.2cm] where $\sigma = [x \mapsto x + y]$. The form of $\sigma$ follows from the fact that the function $x \mapsto x + y$ is the inverse of the function $x \mapsto x - y$. If we apply $\sigma$ to the formula $x = a \wedge y = b$ we get \begin{equation} \label{eq:swap1} \hoare{\texttt{x} = a \wedge \texttt{y} = b}{x := x - y;}{\texttt{x + y} = a \wedge \texttt{y} = b}. \end{equation} The second assignment yields the Hoare triple \\[0.2cm] \hspace*{1.3cm} $ \hoare{\texttt{x + y} = a \wedge \texttt{y} = b}{y := y + x;}{(\texttt{x + y} = a \wedge \texttt{y} = b)\sigma}$ \\[0.2cm] where $\sigma = [y \mapsto y - x]$. The reason is that the function $y \mapsto y - x$ is the inverse of the function $y \mapsto y + x$. This time, we get \[ \hoare{\texttt{x + y} = a \wedge \texttt{y} = b}{y := y + x;}{ \texttt{x + y - x} = a \wedge \texttt{y - x} = b}. \] Simplifying the postcondition yields \begin{equation} \label{eq:swap2} \hoare{\texttt{x + y} = a \wedge \texttt{y} = b}{y := y + x;}{ \texttt{y} = a \wedge \texttt{y - x} = b}. \end{equation} Let us consider the last assignment. We have \\[0.2cm] \hspace*{1.3cm} $\hoare{\texttt{y} = a \wedge \texttt{y - x} = b}{x := y - x;}{ (\texttt{y} = a \wedge \texttt{y - x} = b)\sigma}$ \\[0.2cm] where $\sigma = [x \mapsto y - x]$, since the function $x \mapsto y - x$ is the inverse of the function $x \mapsto y - x$. This yields \[ \hoare{\texttt{y} = a \wedge \texttt{y - x} = b}{x := y - x;}{ \texttt{y} = a \wedge \texttt{y - (y - x)} = b} \] Simplifying the postcondition gives \begin{equation} \label{eq:swap3} \hoare{\texttt{y} = a \wedge \texttt{y - x} = b}{x := y - x;}{ \texttt{y} = a \wedge \texttt{x} = b}. \end{equation} Combining the Hoare triples (\ref{eq:swap1}), (\ref{eq:swap2}) and (\ref{eq:swap3}) we get \begin{equation} \label{eq:swap} \hoare{\texttt{x} = a \wedge \texttt{y} = b}{x:=x-y; y:=y+x; x:=y-x;}{ \texttt{y} = a \wedge \texttt{x} = b}. \end{equation} The Hoare triple (\ref{eq:swap}) shows that the program fragment shown in Figure \ref{fig:swap} swaps the values of the variables $x$ and $y$: If the value of $x$ is $a$ and $y$ has the value $b$ before the program is executed, then afterwards $y$ has the value $a$ and $x$ has the value $b$. The trick shown in Figure \ref{fig:swap} can be used to swap variables without using an auxiliary variable. This is useful because when this code is compiled into machine language, the resulting code will only use two registers. \begin{figure}[!ht] \centering \begin{Verbatim}[ frame = lines, framesep = 0.3cm, labelposition = bottomline, numbers = left, numbersep = -0.2cm, xleftmargin = 0.8cm, xrightmargin = 0.8cm, ] x := x - y; y := y + x; x := y - x; \end{Verbatim} \vspace*{-0.3cm} \caption{A tricky way to swap variables.} \label{fig:swap} \end{figure} \subsection{Conditional Statements} In order to compute the effect of a conditional of the form \\[0.2cm] \hspace*{1.3cm} \texttt{if ($B$) \{ P \} else \{ Q \}} \\[0.2cm] let us assume that before the conditional statement is executed, the precondition $F$ is satisfied. We have to analyse the effect of the program fragments $P$ and $Q$. The program fragment $P$ is only executed when $B$ is true. Therefore, the precondition for $P$ is $F \wedge B$. On the other hand, the precondition for the program fragment $Q$ is $F \wedge \neg B$, since $Q$ is only executed if $B$ is false. Hence, we have the following verification rule: \begin{equation} \label{eq:hoareIf} \bruch{\quad\hoare{F \wedge B}{P}{G}, \qquad \hoare{F \wedge \neg B}{Q}{G}\quad}{ \hoare{F}{if ($B$) P else Q}{G}} \end{equation} In this form, the rule is not always applicable. The reason is that the analysis of the program fragments \texttt{P} and \texttt{Q} yields Hoare triple of the form \begin{equation} \label{eq:hoareI} \hoare{F \wedge B}{P}{G_1} \qquad \mathrm{and} \qquad \hoare{F \wedge \neg B}{Q}{G_2}, \end{equation} and in general $G_1$ and $G_2$ will be different from each other. In order to be able to apply the rule for conditionals we have to find a formula $G$ that is a consequence of $G_1$ and also a consequence of $G_2$, i.~e.~we want to have \[ G_1 \rightarrow G \qquad \mathrm{and} \qquad G_2 \rightarrow G. \] If we find $G$, then the weakening rule can be applied to conclude the validity of \[ \hoare{F \wedge B}{P}{G} \qquad \mathrm{and} \qquad \hoare{F \wedge \neg B}{Q}{G}, \] and this gives us the premisses that are needed for the rule (\ref{eq:hoareIf}). \pagebreak \exampleEng Let us analyze the following program fragment: \\[0.2cm] \hspace*{1.3cm} \texttt{if (x < y) \{ z := x; \} else \{ z := y; \}} \\[0.2cm] We start with the precondition \\[0.2cm] \hspace*{1.3cm} $F = \bigl(\texttt{x} = a \wedge \texttt{y} = b\bigr)$ \\[0.2cm] and want to show that the execution of the conditional establishes the postcondition \\[0.2cm] \hspace*{1.3cm} $G = \bigl(z = \textsl{min}(a, b)\bigr)$. \\[0.2cm] The first assignment ``\texttt{z := x;}'' gives the Hoare triple \\[0.2cm] \hspace*{1.3cm} $\hoare{\texttt{x} = a \wedge \texttt{y} = b \wedge \texttt{x} < \texttt{y}}{z := x}{ \texttt{x} = a \wedge \texttt{y} = b \wedge \texttt{x} < \texttt{y} \wedge \texttt{z} = \texttt{x}} $. \\[0.2cm] In the same way, the second assignment ``\texttt{z := y}'' yields \\[0.2cm] \hspace*{1.3cm} $\hoare{\texttt{x} = a \wedge \texttt{y} = b \wedge \texttt{x} \geq \texttt{y}}{z := y}{ \texttt{x} = a \wedge \texttt{y} = b \wedge \texttt{x} \geq \texttt{y} \wedge \texttt{z} = \texttt{y}}$. \\[0.2cm] Since we have \\[0.2cm] \hspace*{1.3cm} $\texttt{x} = a \wedge \texttt{y} = b \wedge \texttt{x} < \texttt{y} \wedge \texttt{z} = \texttt{x} \rightarrow \texttt{z} = \min(a,b)$ \\[0.2cm] and also \\[0.2cm] \hspace*{1.3cm} $ \texttt{x} = a \wedge \texttt{y} = b \wedge \texttt{x} \geq \texttt{y} \wedge \texttt{z} = \texttt{y} \rightarrow \texttt{z} = \min(a,b) $. \\[0.2cm] Using the weakening rule we conclude that \begin{eqnarray*} \hoare{\texttt{x} = a \wedge \texttt{y} = b \wedge \texttt{x} < \texttt{y}}{z := x;}{ \texttt{z} = \min(a,b)} & & \mathrm{and} \\ \hoare{\texttt{x} = a \wedge \texttt{y} = b \wedge \texttt{x} \geq \texttt{y}}{z := y;}{ \texttt{z} = \min(a,b)} \end{eqnarray*} holds. Now we can apply the rule for the conditional and conclude that \\[0.2cm] $ \hoare{\texttt{x} = a \wedge \texttt{y} = b}{if (x < y) \{ z := x; \} else \{ z := y; \}}{ \texttt{z} = \min(a,b)} $ \\[0.2cm] holds. Thus we have shown that the program fragment above computes the minimum of the numbers $a$ and $b$. \subsection{Loops} Finally, let us analyze the effect of a loop of the form \\[0.2cm] \hspace*{1.3cm} \texttt{while ($B$) \{ P \}} \\[0.2cm] The important point here is that the postcondition of the $n$-th execution of the body of the loop $P$ is the precondition of the $(n\!+\!1)$-th execution of $P$. Basically this means that the precondition and the postcondition of $P$ have to be more or less the same. Hence, this condition is called the \emph{loop invariant}. Therefore, the details of the verification rule for \texttt{while} loops are as follows: \\[0.2cm] \hspace*{1.3cm} $\bruch{\hoare{I \wedge B}{P}{I}}{\quad \hoare{I}{while ($B$) \{ P \}}{I \wedge \neg B}\quad}$ \\[0.2cm] The premiss of this rule expresses the fact that the invariant $I$ remains valid on execution of $P$. However, since $P$ is only executed as long as $B$ is \texttt{true}, the precondition for $P$ is actually the formula $I \wedge B$. The conclusion of the rule says that if the invariant $I$ is true before the loop is executed, then $I$ will be true after the loop has finished. This result is intuitive since every time $P$ is executed $I$ remains valid. Furthermore, the loop only terminates once $B$ gets \texttt{false}. Therefore, the postcondition of the loop can be strengthened by adding $\neg B$. \section{The Euclidean Algorithm} In this section we show how the verification rules of the last section can be used to prove the correctness of a non-trivial program. We will show that the algorithm shown in Figure \ref{fig:gcd.stlx} on page \pageref{fig:gcd.stlx} is correct. The procedure shown in this figure implements the \href{http://en.wikipedia.org/wiki/Euclidean_algorithm}{\emph{Euclidean algorithm}} to compute the greatest common divisor of two natural numbers. Our proof is based on the following property of the function \texttt{gcd}: \\[0.2cm] \hspace*{1.3cm} $\texttt{gcd}(x + y, y) = \texttt{gcd}(x,y) \quad \mbox{for all $x, y \in \mathbb{N}$}$. \begin{figure}[!ht] \centering \begin{Verbatim}[ frame = lines, framesep = 0.3cm, labelposition = bottomline, numbers = left, numbersep = -0.2cm, xleftmargin = 0.8cm, xrightmargin = 0.8cm, ] gcd := procedure(x, y) { while (x != y) { if (x < y) { y := y - x; } else { x := x - y; } } return x; }; \end{Verbatim} \vspace*{-0.3cm} \caption{The Euclidean Algorithm to compute the greatest common divisor.} \label{fig:gcd.stlx} \end{figure} \subsection{Correctness Proof of the Euclidean Algorithm} To start our correctness proof we formulate the invariant of the \texttt{while} loop. Let us define \\[0.2cm] \hspace*{1.3cm} $ I := \bigl(x > 0 \wedge y > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b) \bigr)$ \\[0.2cm] In this formula we have defined the initial values of $x$ and $y$ as $a$ and $b$. In order to establish the invariant at the beginning we have to ensure that the function \texttt{gcd} is only called with positive natural numbers. If we denote these numbers as $a$ and $b$, then the invariant $I$ is valid initially. The reason is that $x = a$ and $y = b$ implies $\texttt{gcd}(x,y) = \texttt{gcd}(a,b)$. In order to prove that the invariant $I$ is maintained in the loop we formulate the Hoare triples for both alternatives of the conditional. For the first conditional we know that \\[0.2cm] \hspace*{1.3cm} $\hoare{I \wedge x \not= y \wedge x < y}{y := y - x;}{(I \wedge x \not= y \wedge x < y)\sigma}$ \\[0.2cm] holds, where $\sigma$ is defined as $\sigma = [y \mapsto y + x]$. Here, the condition $x \not= y$ is the condition controlling the execution of the \texttt{while} loop and the condition $x < y$ is the condition of the \texttt{if} conditional. We rewrite the formula $(I \wedge x \not= y \wedge x < y)\sigma$: \begin{eqnarray*} & & \bigl(I \wedge x \not= y \wedge x < y\bigr)\sigma \\ & \leftrightarrow & \bigl(I \wedge x < y\bigr)\sigma \qquad\qquad \mbox{because $x < y$ implies $x \not= y$} \\ & \leftrightarrow & \bigl(x > 0 \wedge y > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b) \wedge x < y\bigr)[y \mapsto y + x] \\ & \leftrightarrow & x > 0 \wedge y + x > 0 \wedge \texttt{gcd}(x,y+x) = \texttt{gcd}(a,b) \wedge x < y + x \\ & \leftrightarrow & x > 0 \wedge y + x > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b) \wedge 0 < y \end{eqnarray*} In the last step we have used the formula \\[0.2cm] \hspace*{1.3cm} $ \texttt{gcd}(x,y+x) = \texttt{gcd}(x,y) $ \\[0.2cm] and we have simplified the inequality $x < y + x$ as $0 < y$. The last formula implies \\[0.2cm] \hspace*{1.3cm} $ x > 0 \wedge y > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b) $. \\[0.2cm] However, this is precisely the invariant $I$. Therefore we have shown that \begin{equation} \label{eq:if1} \hoare{I \wedge x \not= y \wedge x < y}{y := y - x;}{I} \end{equation} holds. Next, let us consider the second alternative of the \texttt{if} conditional. We have \\[0.2cm] \hspace*{1.3cm} $\hoare{I \wedge x \not= y \wedge x \geq y}{x := x - y;}{(I \wedge x \not= y \wedge x \geq y)\sigma}$ \\[0.2cm] where $\sigma = [x \mapsto x + y]$. The expression $(I \wedge x \not= y \wedge x \geq y)\sigma$ is rewritten as follows: \begin{eqnarray*} & & \bigl(I \wedge x \not= y \wedge x \geq y\bigr)\sigma \\ & \leftrightarrow & \bigl(I \wedge x > y \bigr)\sigma \\ & \leftrightarrow & \bigl(x > 0 \wedge y > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b) \wedge x > y \bigr)[x \mapsto x + y] \\ & \leftrightarrow & x + y > 0 \wedge y > 0 \wedge \texttt{gcd}(x+y,y) = \texttt{gcd}(a,b) \wedge x + y > y \\ & \leftrightarrow & x + y > 0 \wedge y > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b) \wedge x > 0 \end{eqnarray*} The last formula implies that \\[0.2cm] \hspace*{1.3cm} $ x > 0 \wedge y > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b). $ \\[0.2cm] holds. Again, this is our invariant $I$. Therefore we have shown that \begin{equation} \label{eq:if2} \hoare{I \wedge x \not= y \wedge x \geq y}{x := x - y;}{I} \end{equation} holds. If we use the Hoare triples (\ref{eq:if1}) and (\ref{eq:if2}) as premisses for the rule for conditionals we have shown that \\[0.2cm] \hspace*{1.3cm} $ \hoare{I \wedge x \not= y}{if (x < y) \{ y := y - x; \} else \{ x := x - y; \}}{I} $ \\[0.2cm] holds. Now the verification rule for \texttt{while} loops yields \noindent \hspace*{1.3cm} $\{ I \}$ \\[0.1cm] \hspace*{2.2cm} \texttt{while (x != y ) \{} \\[0.1cm] \hspace*{3.2cm} \texttt{if (x < y) \{ y := y - x; \} else \{ x := x - y;\}} \\[0.1cm] \hspace*{2.2cm} \texttt{\}} \quad \\[0.1cm] \hspace*{1.3cm} $\{ I \wedge x = y \}$. \\[0.2cm] Expanding the invariant $I$ in the formula $I \wedge x = y$ shows that the postcondition of the \texttt{while} loop is given as \\[0.2cm] \hspace*{1.3cm} $x > 0 \wedge y > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b) \wedge x = y$. \\[0.2cm] Now the correctness of the Euclidean algorithm can be established as follows: \begin{eqnarray*} & & x > 0 \wedge y > 0 \wedge \texttt{gcd}(x,y) = \texttt{gcd}(a,b) \wedge x = y \\ & \Rightarrow & \texttt{gcd}(x,y) = \texttt{gcd}(a,b) \wedge x = y \\ & \Rightarrow & \texttt{gcd}(x,x) = \texttt{gcd}(a,b) \\ & \Rightarrow & x = \texttt{gcd}(a,b) \qquad\qquad \mathrm{because} \quad \texttt{gcd}(x,x) = x. \end{eqnarray*} All in all we have shown the following: If the \texttt{while} loop terminates, then the variable $x$ will be set to the greatest common divisor of $a$ and $b$, where $a$ and $b$ are the initial values of the variables $x$ and $y$. In order to finish our correctness proof we have to show that the \texttt{while} loop does indeed terminate for all choices of $a$ and $b$. To this end let us define the variable $s$ as follows: \\[0.2cm] \hspace*{1.3cm} $ s := x + y. $ \\[0.2cm] The variables $x$ and $y$ are natural numbers. Therefore $s$ is a natural number, too. Every iteration of the loop reduces the number $s$: either $x$ is subtracted from $s$ or $y$ is subtracted from $s$ and the invariant $I$ shows that both $x$ and $y$ are positive. Therefore, if the \texttt{while} loop would run forever, at some point $s$ would get negative. Since $s$ can not be negative, the loop must terminate. Hence we have shown the correctness of the Euclidean algorithm. \pagebreak \exerciseEng Show that the function $\texttt{power}(x,y)$ that is defined in Figure \ref{fig:power-iterative.stlx} does compute $x^y$, i.~e.~show that $\texttt{power}(x,y) = x^y$ for all natural numbers $x$ and $y$. \begin{figure}[!h] \centering \begin{Verbatim}[ frame = lines, framesep = 0.3cm, labelposition = bottomline, numbers = left, numbersep = -0.2cm, xleftmargin = 1.3cm, xrightmargin = 1.3cm, ] power := procedure(x, y) { r := 1; while (y > 0) { if (y % 2 == 1) { r := r * x; } x := x * x; y := y \ 2; } return r; }; \end{Verbatim} \vspace*{-0.3cm} \caption{A program to compute $x^y$ iteratively.} \label{fig:power-iterative.stlx} \end{figure} \noindent \textbf{Hints}: \begin{enumerate} \item If the initial values of $x$ and $y$ are called $a$ and $b$, then an invariant for the \texttt{while} loop is given as \\[0.2cm] \hspace*{1.3cm} $I := \bigl(r \cdot x^y = a^b\bigr)$. \item The verification rule for the conditional without \texttt{else} is given as \\[0.4cm] \hspace*{1.3cm} $\bruch{\quad\hoare{F \wedge B}{P}{G}, \qquad F \wedge \neg B \rightarrow G\quad}{ \hoare{F}{if ($B$) \{ P \}}{G}} $ \\[0.2cm] This rule is interpreted as follows: \begin{enumerate} \item If both the precondition $F$ and the condition $B$ is valid, then execution of the program fragment $P$ has to establish the validity of the postcondition $G$. \item If the precondition $F$ is valid but we have $\neg B$, then this must imply the postcondition $G$. \end{enumerate} \end{enumerate} \remark Proving the correctness of a nontrivial program is very tedious. Therefore, various attempts have been made to automate the task. For example, \href{http://www.key-project.org/download/hoare/}{\emph{KeY Hoare}} is a tool that can be used to verify the correctness of programs. It is based on Hoare calculus. \section{Symbolic Program Execution} The last section has shown that using Hoare logic to verify a program can be quite difficult. There is another method to prove the correctness of imperative programs. This method is called \emph{symbolic program execution}. Let us demonstrate this method. Consider the program shown in Figure \ref{fig:power-iterative-annotated.stlx}. \begin{figure}[!h] \centering \begin{Verbatim}[ frame = lines, framesep = 0.3cm, labelposition = bottomline, numbers = left, numbersep = -0.2cm, xleftmargin = 1.3cm, xrightmargin = 1.3cm, codes = {\catcode`_=8\catcode`$=3}, commandchars = \\\{\}, ] power := procedure(x$_0$, y$_0$) \{ r$_0$ := 1; while (y$_n$ > 0) \{ if (y$_n$ % 2 == 1) \{ r$_{n+1}$ := r$_n$ * x$_n$; \} x$_{n+1}$ := x$_n$ * x$_n$; y$_{n+1}$ := y$_n$ \symbol{92} 2; \} return r$_N$; \}; \end{Verbatim} \vspace*{-0.3cm} \caption{An annotated programm to compute powers.} \label{fig:power-iterative-annotated.stlx} \end{figure} % $ The main difference between a mathematical formula and a program is that in a formula all occurrences of a variable refer to the same value. This is different in a program because the variables change their values dynamically. In order to deal with this property of program variables we have to be able to distinguish the different occurrences of a variable. To this end, we index the program variables. When doing this we have to be aware of the fact that the same occurrence of a program variable can still denote different values if the variable occurs inside a loop. In this case we have to index the variables in a way that the index includes a counter that counts the number of loop iterations. For concreteness, consider the program shown in Figure \ref{fig:power-iterative-annotated.stlx}. Here, in line 5 the variable \texttt{r} has the index $n$ on the right side of the assignment, while it has the index $r_{n+1}$ on the left side of the assignment in line 5. Here, $n$ denotes the number of times the \texttt{while} loop has been iterated. After the loop in line 10 the variable is indexed as $\texttt{r}_N$, where $N$ denotes the total number of loop iterations. We show the correctness of the given program next. Let us define \\[0.2cm] \hspace*{1.3cm} $ a := x_0, \quad b := y_0$. \\[0.2cm] We show, that the \texttt{while} loop satisfies the invariant \begin{equation} \label{eq:powerInv} r_n \cdot x_n^{y_n} = a^b. \end{equation} This claim is proven by induction on the number of loop iterations. \begin{enumerate} \item[B.C.] $n=0$: Since we have $r_0 = 1$, $x_0 = a$, and $y_0 = b$ we have \\[0.2cm] \hspace*{1.3cm} $r_n \cdot x_n^{y_n} = r_0 \cdot x_0^{y_0} = 1 \cdot a^{b} = a^b$. \item[I.S.] $n \mapsto n + 1$: We need a case distinction with respect to $y \mod 2$: \begin{enumerate} \item $y_n \mod 2 = 1$. Then we have $y_{n} = 2 \cdot (y_n\symbol{92}2) + 1$ and $r_{n+1} = r_n \cdot x_n$. Hence \begin{eqnarray*} & & r_{n+1} \cdot x_{n+1}^{y_{n+1}} \\[0.2cm] & = & (r_{n} \cdot x_n) \cdot (x_{n} \cdot x_{n})^{y_{n}\symbol{92}2} \\[0.2cm] & = & r_{n} \cdot x_n^{2 \cdot (y_{n}\symbol{92}2) + 1} \\[0.2cm] & = & r_{n} \cdot x_n^{y_n} \\ & \stackrel{i.h.}{=} & a^{b} \end{eqnarray*} \item $y_n \mod 2 = 0$. Then we have $y_{n} = 2 \cdot (y_n\symbol{92}2)$ and $r_{n+1} = r_n$. Therefore \begin{eqnarray*} & & r_{n+1} \cdot x_{n+1}^{y_{n+1}} \\[0.2cm] & = & r_{n} \cdot (x_{n} \cdot x_{n})^{y_{n}\symbol{92}2} \\[0.2cm] & = & r_{n} \cdot x_n^{2 \cdot (y_{n} \symbol{92} 2)} \\[0.2cm] & = & r_{n} \cdot x_n^{y_n} \\ & \stackrel{i.h.}{=} & a^{b} \end{eqnarray*} \end{enumerate} \end{enumerate} This shows the validity of the equation (\ref{eq:powerInv}). If the \texttt{while} loop terminates, we must have $y_N = 0$. If $n=N$, then equation (\ref{eq:powerInv}) yields: \\[0.2cm] \hspace*{1.3cm} $r_N \cdot x_N^{y_N} = x_0^{y_0} \;\Longleftrightarrow\; r_N \cdot x_N^{0} = a^b \;\Longleftrightarrow\; r_N \cdot 1 = a^b \;\Longleftrightarrow\; r_N = a^b $ \\[0.2cm] This shows $r_N = a^b$ and since we already know that the \texttt{while} loop terminates, we have proven that $\texttt{power}(a,b) =a^b$. \exerciseEng Use the method of symbolic program execution to prove the correctness of the implementation of the Euclidean algorithm that is shown in Figure \ref{fig:gcd.stlx}. During the proof you should make use of the fact that for all positive natural numbers $a$ and $b$ the equation \\[0.2cm] \hspace*{1.3cm} $\mathtt{gcd}(a, b) = \mathtt{gcd}(a \,\texttt{\%}\, b, b)$ \\[0.2cm] is valid. \begin{figure}[!ht] \centering \begin{Verbatim}[ frame = lines, framesep = 0.3cm, firstnumber = 1, labelposition = bottomline, numbers = left, numbersep = -0.2cm, xleftmargin = 0.8cm, xrightmargin = 0.8cm, ] gcd := procedure(a, b) { while (b != 0) { [a, b] := [b, a % b]; } return a; }; \end{Verbatim} \vspace*{-0.3cm} \caption{An efficient version of the Euclidean algorithm.} \label{fig:gcd.stlx} \end{figure} %%% Local Variables: %%% mode: latex %%% TeX-master: "logik" %%% End:
{ "alphanum_fraction": 0.620020575, "avg_line_length": 40.5023148148, "ext": "tex", "hexsha": "7ff3405661fa8581ff0d06e0f99c776824ec1d7d", "lang": "TeX", "max_forks_count": 18, "max_forks_repo_forks_event_max_datetime": "2021-12-10T19:44:15.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-03T16:05:46.000Z", "max_forks_repo_head_hexsha": "62270c224061f38b637cb6920a0fbe5a56495bb9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "AbdalrohmanGitHub/Logik", "max_forks_repo_path": "Lecture-Notes/hoare.tex", "max_issues_count": 19, "max_issues_repo_head_hexsha": "62270c224061f38b637cb6920a0fbe5a56495bb9", "max_issues_repo_issues_event_max_datetime": "2019-04-21T02:13:23.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-14T15:36:24.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "AbdalrohmanGitHub/Logik", "max_issues_repo_path": "Lecture-Notes/hoare.tex", "max_line_length": 136, "max_stars_count": 13, "max_stars_repo_head_hexsha": "62270c224061f38b637cb6920a0fbe5a56495bb9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "AbdalrohmanGitHub/Logik", "max_stars_repo_path": "Lecture-Notes/hoare.tex", "max_stars_repo_stars_event_max_datetime": "2021-12-26T11:49:25.000Z", "max_stars_repo_stars_event_min_datetime": "2019-10-03T13:25:02.000Z", "num_tokens": 12230, "size": 34994 }
% Template created by Joseph Petitti in 2018 % Released under the CC0 Universal Public Domain Dedication license % https://creativecommons.org/publicdomain/zero/1.0/ % You can do whatever you want with this, you don't even have to cite me \documentclass[a4paper, 12pt, american]{article} % useful packages \usepackage[utf8]{inputenc} \usepackage[american]{babel} \usepackage{csquotes} \usepackage[margin=1in]{geometry} \usepackage{lipsum} \usepackage{graphicx} \usepackage{setspace} \usepackage[page, titletoc, title]{appendix} \usepackage[ style=apa, backend=biber, sortcites=true, sorting=nyt, % isbn=false, % url=false, % doi=false, % eprint=false, hyperref=false, backref=false, % firstinits=false, ]{biblatex} % declare useful stuff for LaTeX to know \DeclareLanguageMapping{american}{american-apa} \graphicspath{ {./images/} } % this is where you'll put all your images \bibliography{references} \title{Put Your Title Here} \author{Put your names here} \date{\today} \begin{document} % set page numbers to Roman for the forematter (before the introduction \pagenumbering{roman} % Title page \begin{center} {\huge Put Your Title Here} \vfill An Interactive Qualifying Project \\ Submitted to the Faculty of \\ WORCESTER POLYTECHNIC INSTITUTE \\ in partial fulfilment of the requirements for the \\ Degree of Bachelor of Science\par \vfill by \\ Group Member 1 \\ Group Member 2 \\ Group Member 3 \\ Group Member 4\par \vfill Date: \\ \today\par \vfill Report Submitted To:\par \end{center} \vspace{\baselineskip} \begin{flushright} % list your sponsors and Sponsor 1 \\ Sponsor Organization 1 \\ \vspace{\baselineskip} Sponsor 2 \\ Sponsor Organization 2\par \vspace{\baselineskip} Professors Adviser 1 and Advieor 2 \\ Worcester Polytechnic Institute \end{flushright} \vfill \newpage \onehalfspacing % the meat of your report will be 1.5 spaced \begin{abstract} This is where your abstract should go. \lipsum[3] \end{abstract} \newpage \section*{Acknowledgements} \addcontentsline{toc}{section}{Acknowledgements} \lipsum[3] \newpage \section*{Executive Summary} \addcontentsline{toc}{section}{Executive Summary} \lipsum[3] \newpage \section*{Authorship} \addcontentsline{toc}{section}{Authorship} \lipsum[3] \newpage \singlespacing % make the table of contents, figures, and tables single spaced \tableofcontents \listoffigures \listoftables \newpage \onehalfspacing % revert back to 1.5 spacing \pagenumbering{arabic} % switch to Arabic numbering for the main part \section{Introduction} \lipsum[8] \newpage \section{Background} \subsection{This is a subsection} In this section, we introduce something blah blah blah. \subsubsection{This is a subsubsection} By definition from Merriam-Webster, culture refers to ``the characteristic features of everyday existence shared by people in a place or time.'' Here's a random fact to show you how citations work. Cantopop flourished in Hong Kong in the 1970s, as a genre of love songs with Cantonese lyrics backed by western- style pop music \parencite{carroll2007}. \lipsum[1] \subsubsection{Here's another subsubsection} \lipsum[3] % this is a picture with a caption \begin{figure}[h] \centering \includegraphics[width=\textwidth]{hong-kong-skyline.jpg} \caption{Hong Kong skyline as seen from Victoria Peak, 2009 (CC0 1.0)} \end{figure} \subsection{Here's a new subsection} \lipsum[1] \subsubsection{And a new subsection} \lipsum[4] \subsubsection{You probably get the point now} \lipsum[1] \newpage \section{Methodology} \lipsum[1] \newpage \section{Findings} \lipsum[1] \newpage \section{Conclusions \& Recommendations} \lipsum[1] \newpage \section*{References} \addcontentsline{toc}{section}{References} \printbibliography[heading=none] \newpage \appendices \section{This is your first appendix} You can add other appendices with sections the same way. \lipsum[4] \end{document}
{ "alphanum_fraction": 0.7570377885, "avg_line_length": 20.0152284264, "ext": "tex", "hexsha": "fe1bfa7ec4a2dde8836ba4e25900dd0a6b3058f2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6d463dfb28ab9b3f51b796dc13eedcc4e95b44f3", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "jojonium/WPI-IQP-Report-Template", "max_forks_repo_path": "proposal/proposal.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "6d463dfb28ab9b3f51b796dc13eedcc4e95b44f3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "jojonium/WPI-IQP-Report-Template", "max_issues_repo_path": "proposal/proposal.tex", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "6d463dfb28ab9b3f51b796dc13eedcc4e95b44f3", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "jojonium/WPI-IQP-Report-Template", "max_stars_repo_path": "proposal/proposal.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1118, "size": 3943 }
\section{Confidence intervals of black box models}
{ "alphanum_fraction": 0.7924528302, "avg_line_length": 13.25, "ext": "tex", "hexsha": "ba204be491684ac02e9ddd438a4e14b2b0c1038d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/statistics/supervisedConfidence/01-00-Confidence.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/statistics/supervisedConfidence/01-00-Confidence.tex", "max_line_length": 50, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/statistics/supervisedConfidence/01-00-Confidence.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 12, "size": 53 }
\par \chapter{{\tt DenseMtx}: Dense matrix object} \par The {\tt DenseMtx} object contains a dense matrix along with row and column indices. The entries in the matrix can be double precision real or double precision complex. It needs to be able to manage its own storage, much like the {\tt Chv} and {\tt SubMtx} objects that are used during the factor and solves, so we include this capability via a contained {\tt DV} object. A {\tt DenseMtx} object may also be found in a list, so there is a {\tt next} field that points to another {\tt DenseMtx} object. \par The {\tt DenseMtx} object also exists in an MPI environment, where it holds the solution and right hand side matrices. Since each of these two matrices is distributed, a processor {\it owns} only part of the global matrix, and so the need for row and column indices to specify which rows and columns are present on which processor.
{ "alphanum_fraction": 0.7689732143, "avg_line_length": 42.6666666667, "ext": "tex", "hexsha": "d04f24c52141609377343156d1bf6e0c367398a8", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-08-29T18:41:28.000Z", "max_forks_repo_forks_event_min_datetime": "2019-08-29T18:41:28.000Z", "max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "alleindrach/calculix-desktop", "max_forks_repo_path": "ccx_prool/SPOOLES.2.2/DenseMtx/doc/intro.tex", "max_issues_count": 4, "max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_issues_repo_issues_event_max_datetime": "2018-01-25T16:08:31.000Z", "max_issues_repo_issues_event_min_datetime": "2017-09-21T17:03:55.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "alleindrach/calculix-desktop", "max_issues_repo_path": "ccx_prool/SPOOLES.2.2/DenseMtx/doc/intro.tex", "max_line_length": 68, "max_stars_count": null, "max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "alleindrach/calculix-desktop", "max_stars_repo_path": "ccx_prool/SPOOLES.2.2/DenseMtx/doc/intro.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 221, "size": 896 }
\chapter{Small-step semantics} \[ \begin{prooftree}[center=false] \infer0[\textsc{IntE}/\textsc{BoolE}]{v \longrightarrow v} \end{prooftree} \qquad \begin{prooftree}[center=false] \infer0[\textsc{Lambda}]{e \longrightarrow \mathbold{\lambda} x : \tau \ldotp e'} \end{prooftree} \] \[ \begin{prooftree} \hypo{e_1 \longrightarrow e'_1} \infer1[\textsc{PairA1}]{\langle e_1,e_2 \rangle \longrightarrow \langle e'_1,e_2 \rangle} \end{prooftree} \qquad \begin{prooftree} \hypo{e_2 \longrightarrow e'_2} \infer1[\textsc{PairA2}]{\langle e_1,e_2 \rangle \longrightarrow \langle e_1,e'_2 \rangle} \end{prooftree} \] \[ \begin{prooftree} \hypo{e_1 \longrightarrow e'_1} \infer1[\textsc{AppF}]{e_1\,e_2 \longrightarrow e'_1\,e_2} \end{prooftree} \quad \begin{prooftree} \hypo{e_2 \longrightarrow e'_2} \infer1[\textsc{AppV}]{(\mathbold{\lambda}x : \tau \ldotp e_1)\,e_2 \longrightarrow (\mathbold{\lambda}x : \tau \ldotp e_1)\,e'_2} \end{prooftree} \] \[ \begin{prooftree} \hypo{e[\sfrac{v}{x}] \longrightarrow v'} \infer1[\textsc{App}]{(\mathbold{\lambda}x : \tau \ldotp e)\,v \longrightarrow v'} \end{prooftree} \quad \begin{prooftree} \hypo{e \longrightarrow e'} \infer1[\textsc{FixF}]{\text{\textbf{fix }} e \longrightarrow \text{\textbf{fix }} e'} \end{prooftree} \quad \begin{prooftree} \hypo{e[\sfrac{\text{\textbf{fix }} (\mathbold{\lambda}x : \tau \ldotp e)}{x}] \rightarrow e'} \infer1[\textsc{FixV}]{\text{\textbf{fix }} (\mathbold{\lambda}x : \tau \ldotp e) \rightarrow e'} \end{prooftree} \] \[ \begin{prooftree} \hypo{c \longrightarrow c'} \infer1[\textsc{CondC}]{\text{\textbf{if} } c \text{ \textbf{then} } e_1 \text{ \textbf{else} } e_2 \longrightarrow \text{\textbf{if} } c' \text{ \textbf{then} } e_1 \text{ \textbf{else} } e_2} \end{prooftree} \] \begin{prooftree*} \hypo{e_1 \longrightarrow e'_1} \infer1[\textsc{CondA1}]{\text{\textbf{if} } v \text{ \textbf{then} } e_1 \text{ \textbf{else} } e_2 \longrightarrow \text{\textbf{if} } v \text{ \textbf{then} } e'_1 \text{ \textbf{else} } e_2} \end{prooftree*} \begin{prooftree*} \hypo{e_2 \longrightarrow e'_2} \infer1[\textsc{CondA2}]{\text{\textbf{if} } v \text{ \textbf{then} } e_1 \text{ \textbf{else} } e_2 \longrightarrow {\text{\textbf{if} } v \text{ \textbf{then} } e_1 \text{ \textbf{else} } e'_2}} \end{prooftree*} \[ \begin{prooftree} \hypo{v \longrightarrow \text{True}} \infer1[\textsc{CondTrue}]{\text{\textbf{if} } v \text{ \textbf{then} } e_1 \text{ \textbf{else} } e_2 \longrightarrow e_1} \end{prooftree} \quad \begin{prooftree} \hypo{v \longrightarrow \text{False}} \infer1[\textsc{CondFalse}]{\text{\textbf{if} } v \text{ \textbf{then} } e_1 \text{ \textbf{else} } e_2 \longrightarrow e_2} \end{prooftree} \] \[ \begin{prooftree} \hypo{e_1 \longrightarrow e'_1} \infer1[\textsc{PrimBinOpA1}]{e_1 \text{ \textbf{binop} } e_2 \longrightarrow e'_1 \text{ \textbf{binop} } e_2} \end{prooftree} \] \[ \begin{prooftree} \hypo{e_2 \longrightarrow e'_2} \infer1[\textsc{PrimBinOpA1}]{e_1 \text{ \textbf{binop} } e_2 \longrightarrow e_1 \text{ \textbf{binop} } e'_2} \end{prooftree} \] \[ \begin{prooftree} \infer0[\textsc{PrimBinOp}]{v_1 \text { \textbf{binop} } v_2 \longrightarrow v} \end{prooftree} \quad \begin{prooftree} \hypo{e \longrightarrow e'} \infer1[\textsc{PrimOpA}]{\text{\textbf{op} } e \longrightarrow \text{\textbf{op} } e'} \end{prooftree} \quad \begin{prooftree} \infer0[\textsc{PrimOp}]{\text{\textbf{op} } v \longrightarrow v'} \end{prooftree} \]
{ "alphanum_fraction": 0.6508762322, "avg_line_length": 36.52, "ext": "tex", "hexsha": "d11f76cb93ea50508b4037b753695db7a5618e60", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "d05f09eb401d1b5fde044e6ee37ebcbc800bc78a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ShinKage/lambda", "max_forks_repo_path": "docs/smallstep.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d05f09eb401d1b5fde044e6ee37ebcbc800bc78a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ShinKage/lambda", "max_issues_repo_path": "docs/smallstep.tex", "max_line_length": 198, "max_stars_count": null, "max_stars_repo_head_hexsha": "d05f09eb401d1b5fde044e6ee37ebcbc800bc78a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ShinKage/lambda", "max_stars_repo_path": "docs/smallstep.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1501, "size": 3652 }
%% !TEX root = manual.tex \section{SST/macro Parameter files} There are parameter files for the main network models (MACRELS, PISCES, SCULPIN, SNAPPR) in the top-level examples directory. \label{sec:parameters} A minimal parameter file setting up a 2D-torus topology is shown below. An equivalent Python input file that reads an ini file is also shown. A detailed listing of parameter namespaces and keywords is given in Section \ref{chapter:parameters}. Both the \inlineshell{ini} files and Python files make careful use of namespaces. \begin{ViFile} amm_model = amm1 congestion_model = LogP node { #run a single mpi test app1 { indexing = block allocation = first_available launch_cmd = aprun -n8 -N1 name = sstmac_mpi_testall argv = sendrecvMessage_size = 128 } ncores = 1 memory { model = simple bandwidth = 1GB/s latency = 10ns } proc { frequency = 1GHz } nic { injection { bandwidth = 1GB/s latency = 1us } model = simple } } switch { link { bandwidth = 1.0GB/s latency = 100ns } logp { bandwidth = 1GB/s out_in_latency = 1us } } topology { name = torus geometry = 4,4 } \end{ViFile} The input file follows a basic syntax of \inlinefile{parameter = value}. Parameter names follow C++ variable rules (letters, numbers, underscore) while parameter values can contain spaces. Trailing and leading whitespaces are stripped from parameters. Comments can be included on lines starting with \#. \subsection{Parameter Namespace Rules} \label{subsec:parameterNamespace} Periods denote nesting of parameter namespaces. The parameter \inlineshell{node.memory.model} will be nested in namespace \inlineshell{memory} inside namespace \inlineshell{node}. If inside a given namespace, \sstmacro looks only within that namespace. The preferred syntax more closely resembles C++ namespace declarations. Namespaces are scoped using brackets \{\}: \begin{ViFile} node { model = simple memory { model = simple bandwidth = 1GB/s latency = 10ns } } \end{ViFile} Any line containing a single string with an opening \{ starts a new namespace. A line containing only a closing \} ends the innermost namespace. The syntax is not as flexible as C++ since the opening \{ must appear on the same line as the namespace and the closing \} must be on a line of its own. A detailed listing of parameter namespaces and keywords is given in Section \ref{chapter:parameters}. \subsection{Initial Example} \label{subsec:initialExample} Continuing with the example above, we see the input file is broken into namespace sections. First, application launch parameters for each node must be chosen determining what application will launch, how nodes will be allocated, how ranks will be indexed, and finally what application will be run. Additionally, you must specify how many processes to launch and how many to spawn per node. We currently recommend using aprun syntax (the launcher for Cray machines), although support is being added for other process management systems. \sstmacro can simulate command line parameters by giving a value for \inlinefile{node.app1.argv}. A network must also be chosen. In the simplest possible case, the network is modeled via a simple latency/bandwidth formula. For more complicated network models, many more than two parameters will be required. See \ref{sec:tutorial:networkmodel} for a brief explanation of \sstmacro network congestion models. A topology is also needed for constructing the network. In this case we choose a 2-D 4$\times$4 torus (16 switches). The \inlinefile{topology.geometry} parameter takes an arbitrarily long list of numbers as the dimensions to the torus. Finally, we must construct a node model. In this case, again, we use the simplest possible models for the node, network interface controller (NIC), and memory. Parameter files can be constructed in a more modular way through the \inlinefile{include} statement. An alternative parameter file would be: \begin{ViFile} include machine.ini # Launch parameters node { app1 { indexing = block allocation = first_available launch_cmd = aprun -n2 -N1 name = user_mpiapp_cxx argv = # Application parameters sendrecvMessage_size = 128 } } \end{ViFile} where in the first line we include the file \inlinefile{machine.ini}. All network, topology, and node parameters would be placed into a \inlinefile{machine.ini} file. In this way, multiple experiments can be linked to a common machine. Alternatively, multiple machines could be linked to the same application by creating and including an \inlinefile{application.ini}.
{ "alphanum_fraction": 0.7651122625, "avg_line_length": 35.0909090909, "ext": "tex", "hexsha": "31255b7488b8d6390910aafb4db7418a0ecaca1d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "067a2cb9f1606b652396b5dda6093096e5bcf2d7", "max_forks_repo_licenses": [ "BSD-Source-Code" ], "max_forks_repo_name": "calewis/sst-macro", "max_forks_repo_path": "docs/manual/ParamsTutorial.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "067a2cb9f1606b652396b5dda6093096e5bcf2d7", "max_issues_repo_issues_event_max_datetime": "2020-05-15T09:39:59.000Z", "max_issues_repo_issues_event_min_datetime": "2020-04-20T08:22:20.000Z", "max_issues_repo_licenses": [ "BSD-Source-Code" ], "max_issues_repo_name": "calewis/sst-macro", "max_issues_repo_path": "docs/manual/ParamsTutorial.tex", "max_line_length": 179, "max_stars_count": 1, "max_stars_repo_head_hexsha": "067a2cb9f1606b652396b5dda6093096e5bcf2d7", "max_stars_repo_licenses": [ "BSD-Source-Code" ], "max_stars_repo_name": "calewis/sst-macro", "max_stars_repo_path": "docs/manual/ParamsTutorial.tex", "max_stars_repo_stars_event_max_datetime": "2020-04-20T08:23:30.000Z", "max_stars_repo_stars_event_min_datetime": "2020-04-20T08:23:30.000Z", "num_tokens": 1114, "size": 4632 }
\subsection{Carry trade}
{ "alphanum_fraction": 0.7142857143, "avg_line_length": 5.6, "ext": "tex", "hexsha": "8b4218ef2a9b9229746de0611f15b283a834bc28", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/economics/commodities/06-01-carry.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/economics/commodities/06-01-carry.tex", "max_line_length": 24, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/economics/commodities/06-01-carry.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 8, "size": 28 }
% % The first command in your LaTeX source must be the \documentclass command. \documentclass[sigconf]{acmart} \usepackage[utf8]{inputenc} \usepackage{fancyvrb} \usepackage{graphicx} \usepackage{comment} \usepackage{xspace} %\usepackage[usenames, dvipsnames]{xcolor} %\usepackage{hyperref} \usepackage{amsmath} \newcommand{\argmin}{\arg\!\min} \newcommand{\argmax}{\arg\!\max} %para el simbolo de chequeado \usepackage{amssymb}% http://ctan.org/pkg/amssymb \usepackage{pifont}% http://ctan.org/pkg/pifont \newcommand{\cmark}{\ding{51}}% \newcommand{\xmark}{\ding{55}}% \usepackage{booktabs} \usepackage{multirow} \newcommand{\ah}[1]{{\color{blue}\textsc{ah:} #1}} \usepackage{soul} %middleline \usepackage{pgfplots} % % defining the \BibTeX command - from Oren Patashnik's original BibTeX documentation. \def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08emT\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} % Rights management information. % This information is sent to you when you complete the rights form. % These commands have SAMPLE values in them; it is your responsibility as an author to replace % the commands and values with those provided to you when you complete the rights form. % % These commands are for a PROCEEDINGS abstract or paper. \copyrightyear{2018} \acmYear{2018} \setcopyright{acmlicensed} \acmConference[LA-WEB 2019]{10th Latin American Web Congress LA-WEB 2019}{May 13--14, 2019}{San Francisco, USA} \acmBooktitle{10th Latin American Web Congress LA-WEB 2019, May 13--14, San Francisco, USA} \acmPrice{15.00} \acmDOI{10.1145/1122445.1122456} \acmISBN{978-1-4503-9999-9/18/06} % % These commands are for a JOURNAL article. %\setcopyright{acmcopyright} %\acmJournal{TOG} %\acmYear{2018}\acmVolume{37}\acmNumber{4}\acmArticle{111}\acmMonth{8} %\acmDOI{10.1145/1122445.1122456} % % Submission ID. % Use this when submitting an article to a sponsored event. You'll receive a unique submission ID from the organizers % of the event, and this ID should be used as the parameter to this command. %\acmSubmissionID{123-A56-BU3} % % The majority of ACM publications use numbered citations and references. If you are preparing content for an event % sponsored by ACM SIGGRAPH, you must use the "author year" style of citations and references. Uncommenting % the next command will enable that style. %\citestyle{acmauthoryear} % % end of the preamble, start of the body of the document source. \begin{document} % % The "title" command has an optional parameter, allowing the author to define a "short title" to be used in page headers. \title{NIFify: Towards Better Quality Entity Linking Datasets} % % The "author" command and its associated commands are used to define the authors and their affiliations. % Of note is the shared affiliation of the first two authors, and the "authornote" and "authornotemark" commands % used to denote shared contribution to the research. \author{Henry Rosales-M\'endez} \affiliation{% \institution{DCC, University of Chile} } \email{[email protected]} \author{Aidan Hogan} \affiliation{% \institution{IMFD; DCC, University of Chile} } \email{[email protected]} \author{Barbara Poblete} \affiliation{% \institution{IMFD; DCC, University of Chile} } \email{[email protected]} % % By default, the full list of authors will be used in the page headers. Often, this list is too long, and will overlap % other information printed in the page headers. This command allows the author to define a more concise list % of authors' names for this purpose. \renewcommand{\shortauthors}{Rosales-M\'endez et al.} % % The abstract is a short summary of the work to be presented in the article. \begin{abstract} The Entity Linking (EL) task identifies entity mentions in a text corpus and associates them with a corresponding unambiguous entry in a Knowledge Base. The evaluation of EL systems relies on the comparison of their results against gold standards. A common format used to represent gold standard datasets is the NLP Interchange Format (NIF), which uses RDF as a data model. However, creating gold standard datasets for EL is a time-consuming and error-prone process. In this paper we propose a tool called NIFify to help manually generate, curate, visualize and validate EL annotations; the resulting tool is useful, for example, in the creation of gold standard datasets. NIFify also serves as a benchmark tool that enables the assessment of EL results. Using the validation features of NIFify, we further explore the quality of popular EL gold standards. \end{abstract} % % The code below is generated by the tool at http://dl.acm.org/ccs.cfm. % Please copy and paste the code instead of the example below. % \begin{comment} \begin{CCSXML} <ccs2012> <concept> <concept_id>10010520.10010553.10010562</concept_id> <concept_desc>Computer systems organization~Embedded systems</concept_desc> <concept_significance>500</concept_significance> </concept> <concept> <concept_id>10010520.10010575.10010755</concept_id> <concept_desc>Computer systems organization~Redundancy</concept_desc> <concept_significance>300</concept_significance> </concept> <concept> <concept_id>10010520.10010553.10010554</concept_id> <concept_desc>Computer systems organization~Robotics</concept_desc> <concept_significance>100</concept_significance> </concept> <concept> <concept_id>10003033.10003083.10003095</concept_id> <concept_desc>Networks~Network reliability</concept_desc> <concept_significance>100</concept_significance> </concept> </ccs2012> \end{CCSXML} \ccsdesc[500]{Computer systems organization~Embedded systems} \ccsdesc[300]{Computer systems organization~Redundancy} \ccsdesc{Computer systems organization~Robotics} \ccsdesc[100]{Networks~Network reliability} % % Keywords. The author(s) should pick words that accurately describe the work being % presented. Separate the keywords with commas. \keywords{datasets, neural networks, gaze detection, text tagging} % % A "teaser" image appears between the author and affiliation information and the body % of the document, and typically spans the page. \begin{teaserfigure} \includegraphics[width=\textwidth]{sampleteaser} \caption{Seattle Mariners at Spring Training, 2010.} \Description{Enjoying the baseball game from the third-base seats. Ichiro Suzuki preparing to bat.} \label{fig:teaser} \end{teaserfigure} \end{comment} % % This command processes the author and affiliation and title information and builds % the first part of the formatted document. \maketitle %------------------------------------------------------------ \section{Introduction} Entity Linking (EL) involves annotating entity mentions in a text and associating them with a corresponding unambiguous identifier in a Knowledge Base (KB). EL has gained increasing attention in recent years due mainly to the availability of large KBs on the Web (e.g., Wikipedia, DBpedia, Wikidata, BabelNet) that offer unambiguous identifiers and relevant information for a wide range of entities. For instance, in the sentence \textbf{S1} \textit{``Jackson won an award as best-selling artist of the 1980s"} an EL system targeting the DBpedia KB should identify \textit{Jackson} as \texttt{dbr:Michael\_Jackson}\footnote{Throughout, we use well-known prefixes according to \url{http://prefix.cc}}; in this way, we know that the text speaks about a famous musician from the U.S. who is also known as the \textit{King of Pop}. EL thus helps to build a bridge from unstructured information (text) to (semi-)structured data (KBs). Many applications then rely on EL, including semantic search, semantic annotation, text enrichment, entity summarization, relation extraction, and more besides. Several EL systems have been proposed thus far, along with a range of gold standards for evaluation purposes (surveyed later in Table~\ref{tab:datasets}). However, as research on EL has continued to advance, more specialized requirements are being considered, reflecting real environments that stand to benefit from EL; such requirements include multilingualism, specific domains, noisy texts, short texts, semi-structured inputs, etc. %KEA~\cite{KEA2016}, for instance, is proposed to accurately perform over tweets, that represent a source of short and noisy information. (Aidan: a little specific) With this diversification of requirements, traditional gold standards are not enough: novel gold standards are ideally required to reflect different contexts. Gold standard datasets are commonly built manually by expert humans reflecting a ground truth. Early datasets were written in (varying) ad hoc formats that required special processing. Hellmann et al~\cite{NIFpaper} thus proposed the NLP Interchange Format (NIF) in order to improve the interoperability of NLP tools, including EL tools. NIF is based on the RDF data model, defining a vocabulary in OWL for representing and sharing NLP-related annotations. %A further benefit of this approach is that it fosters extensibility, where we will later describe minor extensions to represent also the types of entities being linked. %Although its good adoption in recent research, this format does not allow the specification of entity type for all scenarios. Our first proposal in this paper is the enhancing of NIF, covering in that way some scenarios where current mechanisms for the specification of entity types are not suitable. Despite the benefits of NIF, the creation of gold standards is still a complex, error-prone and time-consuming work; hence a number of tools have been proposed to help experts in this task. R\"oder el al.~\cite{N3} craft three NIF datasets from texts written in English and German that were tagged manually using their own tool, but to the best of our knowledge the tool is not openly available. Looking for mistakes in datasets, Kunal et al.~\cite{Kunal2017} propose guidelines to validate EL datasets, providing the EAGLET system that checks a variety of quality rules, helping experts to reduce errors; however, some important errors, such as verifying that the target of a link is not a redirect page, are not covered. On the other hand, other works have focused on standardizing the assessment process, providing benchmarking suites (e.g., GERBIL~\cite{gerbil-2015}, Orbis~\cite{Orbis2018}) that can quickly compare results for state-of-the-art EL systems against a variety of datasets. More generally, all of these NIF operations -- creating, validating and performing experiments with EL datasets -- have, to the best of our knowledge, been addressed as independent systems. In this short paper, we thus describe NIFify: a tool that simultaneously supports the creation, visualization, and validation of NIF datasets, as well as the comparison of EL systems. With our tool -- shown in Figure 1 -- we include some functionalities not covered by previous approaches for creating, modifying and validating NIF datasets. Additionally, we allow to visualise the results of EL systems at both a sentence and document level. %We show in Figure 1 the main view of our tool that corresponds to the process of annotation and visualization. %which . \begin{figure*}[tb] \label{fig:sys} \includegraphics[width=1\textwidth]{figs/screenshot} \caption{The main view of NIFify showing: (a) the class-reference input to filter annotations; (b) the document text input; (c) the mention identification field; and (d) the annotation visualization.} \end{figure*} %NIFify contains a variety of desirable properties, where some of them were incorporated during the construction of VoxEL~\cite{VoxEL2018}, a dataset that contains the same annotation aligned by each sentence/document for five languages. Additionally, of the NIF specifications, NIFify is built also to handle our NIF extensions. %In follow ... %Dataset de tweets: Analysis of named entity recognition and linking for tweets %version 2 de NIF http://persistence.uni-leipzig.org/nlp2rdf/specification/version.html %http://dashboard.nlp2rdf.aksw.org/ %http://persistence.uni-leipzig.org/nlp2rdf/specification/api.html %https://stp.lingfil.uu.se/~nivre/research/MaltXML.html %------------------------------------------------------------------------------- \section{Background} \label{sec:nif} The typical way to evaluate EL systems is through gold standard datasets, which contain text corpora and their corresponding annotations of entity mentions with respect to the identifiers of a given KB (or multiple KBs). One can then use such datasets in order to measure the quality of the output of an EL system. As more and more such datasets were proposed for EL, interoperability became an issue: various formats were used to represent such datasets. One of the first formats proposed for EL annotation was for the MSNBC~\cite{cucerzan2007large} dataset, which has two separate files: one a plain text file, and the other an XML file describing the annotations. This same format was followed by other authors proposing further EL datasets. e.g., ACE2004~\cite{aquaint}, AQUAINT~\cite{aquaint}, IITB~\cite{IITB2009}. However, other EL datasets began to follow other formats. In Table~\ref{tab:datasets} we list some of the most popular EL datasets in the literature along with some details of their content: whether or not they were created manually (\textbf{Mn}), whether or not the entity mentions are explicitly typed (\textbf{Typ}), and the format used. In terms of formats, many are based on XML (e.g., MSNBC~\cite{cucerzan2007large}, IITB~\cite{IITB2009}, RENDEN~\cite{renden2016}, CAT~\cite{meantime2016}) or CSV (e.g., AIDA~\cite{aida2011}, SemEval~\cite{moro2015semeval}). However, a number also use RDF as a base data-model: Melo et al.~\cite{Lexvo2008} proposed Lexvo\footnote{\url{http://lexvo.org/ontology}; January 1st, 2019.} as a RDF-based format and service that defines a unique URI for terms, languages, scripts, and characters from a text corpus; %In this context, Melo et al.~\cite{Lexvo2008} propose the Lexvo.org service and Lexvo Ontology\footnote{\url{http://lexvo.org/ontology}; January 1st, 2019.}, that allow the constructions of human-readable and machine-readable data based on RDF triples. Lexvo defines a unique URI for terms, languages, scripts, and characters for use in Semantic Web; and provide links to several thesauri and KBs such as Wiktionary and Wikipedia. later, Hellmann et al.~\cite{NIFpaper} the NLP Interchange Format (NIF), based on RDF, which is interoperable with a variety of NLP tools, and has been used by several recent EL datasets (e.g., N3-RSS 500~\cite{N3}, Reuters 128~\cite{N3}, Wes2015~\cite{wes2015}, News-100~\cite{N3}, DBpedia Abstracts~\cite{abstracts2016}, VoxEL~\cite{VoxEL2018}). Further legacy datasets were transformed to NIF, including KORE50 and DBpedia Spotlight\footnote{\url{http://apps.yovisto.com/labs/ner-benchmarks}; January 1st, 2019.}. \begin{comment} \newcommand{\ccell}[1]{\multicolumn{1}{c}{#1}} \newcommand{\rcell}[1]{\multicolumn{1}{r}{#1}} \setlength{\tabcolsep}{1.2ex} \begin{table}[tb!] %\begin{table}[!] \centering \caption{Survey of dataset for EL task. We highlighted in bold those datasets that have been re-writing to NIF format.} \label{tab:datasets} %\resizebox{\textwidth}{!}{ \begin{tabular}{lrrrrccc} \toprule \textbf{Dataset}~~~~~~~~~~~~~~~~~~ & \ccell{Year}&\rcell{$|D|$} & \rcell{$|S|$} & \rcell{$|E|$} & \ccell{\textbf{Mn}} & \ccell{\textbf{Typ}}&\ccell{\textbf{Format}}\\\midrule MSNBC~\cite{cucerzan2007large} &2007&20 &668 &747 &\xmark &\xmark & MSNBC$_{xml}$\\\midrule IITB~\cite{IITB2009} &2009&103 &1,781 &18,308 &\cmark &\xmark & IITB$_xml$\\\midrule AIDA/CoNLL-Complete~\cite{aida2011} &2011&1393 &22,137 &34,929 &\cmark &\xmark & AIDA$_{csv}$ \\\midrule ACE2004~\cite{aquaint} &2011&57 &- &306 &\xmark &\xmark & MSNBC$_{xml}$\\\midrule AQUAINT~\cite{aquaint} &2011&50 &533 &727 &\xmark &\xmark & MSNBC$_{xml}$\\\midrule \textbf{DBpedia Spotlight} \cite{mendes2011dbpedia} &2011&10 &58 &331 &\cmark &\xmark & Lexvo\\\midrule \textbf{KORE50}~\cite{kore50} &2012&50 &50 &144 &\cmark &\xmark & AIDA$_{csv}$\\\midrule N3-RSS 500~\cite{N3} &2014&1 &500 &1000 &\cmark &\xmark & NIF \\\midrule Reuters 128~\cite{N3} &2014&128 &- &881 &\cmark &\xmark & NIF \\\midrule News-100~\cite{N3} &2014&100 &- &1656 &\cmark &\xmark & NIF \\\midrule Wes2015~\cite{wes2015} &2015&331 &- &28,586 &\cmark &\xmark & NIF \\\midrule SemEval 2015 Task 13~\cite{moro2015semeval} &2015&4 &137 &769 &\cmark &\xmark & SemEval$_{csv}$\\ \midrule Thibaudet~\cite{renden2016} &2016&1 &3,807 &2,980 &\xmark &\cmark & RENDEN$_{xml}$\\\midrule Bergson~\cite{renden2016} &2016&1 &4,280 &380 &\xmark &\cmark & RENDEN$_{xml}$\\\midrule DBpedia Abstracts ~\cite{abstracts2016} &2016&39,132 &- &505,033 &\xmark &\xmark & NIF\\\midrule MEANTIME~\cite{meantime2016} &2016&120 &597 &2,790 &\cmark &\cmark & CAT$_{xml}$\\\midrule VoxEL$_R$~\cite{VoxEL2018} &2018&15 &94 &674 &\cmark &\xmark & NIF\\\midrule VoxEL$_S$~\cite{VoxEL2018} &2018&15 &94 &204 &\cmark &\xmark & NIF\\ \bottomrule \end{tabular} %} \end{table} \end{comment} \newcommand{\ccell}[1]{\multicolumn{1}{c}{#1}} \newcommand{\rcell}[1]{\multicolumn{1}{r}{#1}} %\setlength{\tabcolsep}{1.2ex} \begin{table}[tb!] %\begin{table}[!] \centering \caption{Overview of popular EL datasets; we highlight in bold those datasets that have been converted to NIF \label{tab:datasets}} %\resizebox{\textwidth}{!}{ \begin{tabular}{lccc} \toprule \textbf{Dataset}~~~~~~~~~~~~~~~~~~ & \ccell{\textbf{Mn}} & \ccell{\textbf{Typ}}&\ccell{\textbf{Format}}\\\midrule MSNBC~\cite{cucerzan2007large} &\xmark &\xmark & MSNBC \\ %\midrule%& XML \\\midrule IITB~\cite{IITB2009} &\cmark &\xmark & IITB \\ %\midrule%& XML \\\midrule AIDA/CoNLL~\cite{aida2011} &\cmark &\xmark & AIDA \\ %\midrule%& CSV \\\midrule ACE2004~\cite{aquaint} &\xmark &\xmark & MSNBC \\ %\midrule%& XML \\\midrule AQUAINT~\cite{aquaint} &\xmark &\xmark & MSNBC \\ %\midrule%& XML\\\midrule \textbf{DBpedia Spotlight} \cite{mendes2011dbpedia} &\cmark &\xmark & Lexvo \\ %\midrule%& RDF \\\midrule \textbf{KORE50}~\cite{kore50} &\cmark &\xmark & AIDA \\ %\midrule%& CSV\\\midrule N3-RSS 500~\cite{N3} &\cmark &\xmark & NIF \\ %\midrule%& RDF/OWL \\\midrule Reuters 128~\cite{N3} &\cmark &\xmark & NIF \\ %\midrule%& RDF/OWL\\\midrule News-100~\cite{N3} &\cmark &\xmark & NIF \\ %\midrule%& RDF/OWL\\\midrule Wes2015~\cite{wes2015} &\cmark &\xmark & NIF \\ %\midrule%& RDF/OWL\\\midrule SemEval 2015 Task 13~\cite{moro2015semeval} &\cmark &\xmark & SemEval \\ %\midrule%& CSV\\ \midrule Thibaudet~\cite{renden2016} &\xmark &\cmark & RENDEN \\ %\midrule%& XML\\\midrule Bergson~\cite{renden2016} &\xmark &\cmark & RENDEN \\ %\midrule%& XML\\\midrule DBpedia Abstracts ~\cite{abstracts2016} &\xmark &\xmark & NIF \\ %\midrule%& RDF/OWL\\\midrule MEANTIME~\cite{meantime2016} &\cmark &\cmark & CAT \\ %\midrule%& XML \\\midrule VoxEL~\cite{VoxEL2018} &\cmark &\xmark & NIF \\%& RDF/OWL\\ \bottomrule \end{tabular} %} \end{table} NIF is based on RDF triples $<$\textit{subject}, \textit{predicate}, \textit{object}$>$ where the \textit{subject} identifies a unit of information, such as a document, sentence, or annotation; and each \textit{predicate---object} pair defines values for their properties. Figure~\ref{fig:nif} provides a brief example of a single entity annotation serialized in the Turtle syntax of RDF. The properties \texttt{nif:beginIndex} and \texttt{nif:endIndex} indicate the start and end position of the entity mention in a sentence; the targeted KB identifier is specified using the property \texttt{itsrdf:taIdentRef}; and a class can be defined with \texttt{itsrdf:taClassRef}. Other NIF properties capture metadata for other NLP tasks, such as stemming (\texttt{nif:stem}), part-of-speech tagging (\texttt{nif:oliaCategory}, \texttt{nif:lemma}), etc. \begin{figure} \caption{NIF triples to specify the annotation of Jackson from sentence S1} \label{fig:nif} \begin{Verbatim}[frame=single] <https://example.org/doc1#char=0,7> a nif:String, nif:Context, nif:Phrase, nif:RFC5147String; nif:anchorOf """Jackson"""^^xsd:string ; nif:beginIndex "0"^^xsd:nonNegativeInteger ; nif:endIndex "7"^^xsd:nonNegativeInteger ; itsrdf:taIdentRef </wiki/Michael_Jackson> . \end{Verbatim} \end{figure} %Sentence: %Thomas and Mario are strikers playing in Munich. %------------------------------------------------ %https://en.wikipedia.org/wiki/FC_Bayern_Munich %https://en.wikipedia.org/wiki/Munich %The US and the EU do not agree however on considering wether to supply military aid to Kiev. % Towards Universal Multilingual Knowledge Bases % Lexvo.org: Language-Related Information for the Linguistic Linked Data Cloud % http://www.lexvo.org/linkeddata/resources.html% % KORE50 y DBpedia Spotlight fueron transformadas en NIF (http://apps.yovisto.com/labs/ner-benchmarks) %\textcolor{red}{We show in Figure 1 the main view of NIFify, where you can make use of creation/modification functionalities. In this tab, you can either, create a new one or upload an existing one for its visualization or modifications. NIFify handle corpora with more than one document, which is a property presented in the majority of current datasets. Our tool disposes a pletora of facilities to change document, sentences and annotations according to our need. Additionally, we have ways to manually identify in the text some entities that may be useful in some contexts, such as pro-forms and numbers.} %\textcolor{red}{We employ different color in the visualization to strees specific aspect of the annotations. In the default setting, the annnotation colors say whether the they are overlapped or not, but in addition, the colors can be customized to differentiate the classes to which it belongs.} \begin{comment} One benefit of using RDF as a core data model is that NIF can be readily extended with further class and property terms, as needed. For example, for the purposes of the Wes2015 dataset~\cite{wes2015} for Document Retrieval, novel properties and classes (e.g., \texttt{si:Query}, \texttt{si:result}, \texttt{yv:queryId}) were used alongside NIF. We now describe a minor extension to NIF that we have incorporated into our NIFify system (whose need arose while annotating the VoxEL dataset~\cite{VoxEL2018}). %In this Section we detail the need for new mechanisms to specify the entity type according to the links and not related to the mentions, handling in this way, annotations that incorporate more than one link. % %Entity type specifications are valuable metadata in NLP, used commonly as an indicator in the decision making of processes that involve entities. The detection of entities type has been well studied so far, separated by some author as the subtask Entity Type Recognition (ETR) from Entity Recognition. ETR also have been stressed on international competitions as CALCS~\cite{calcs2018shtask}, including Tracks that aims the entity type prediction of the entities from a given text corpus. Per Table~\ref{tab:datasets}, many EL datasets type annotations according to a list of predefined classes; this practice was prevalent in earlier Named Entity Recognition (NER) works, whose goal was to identify entities of different types but without having to link them to a KB. The entity type can be specified in NIF on an annotation with the property \texttt{itsrdf:taClassRef}.\footnote{See example: \url{http://persistence.uni-leipzig.org/nlp2rdf/ontologies/nif-core/example.ttl}; January 1st, 2019.} However, problematic situations emerge when the same annotation may refer to more than one URI in the KB. This is due to the fact that either the context is not enough to fully disambiguate the entity mention, or the entity mention is intrinsically ambiguous, per the following two examples: \begin{description} \item[S1] \textit{``Bush was president of the United States of America."} \item[S2] \textit{``Iran is not capable of developing a nuclear program without Moscow's help."} \end{description} In sentence \textbf{S1}, without further context, it remains unclear whether the entity mention ``\textit{Bush}'' refers to the 41st US president George H. W. Bush or to his son; when evaluating EL systems, we may wish to take both possibilities into account. In sentence \textbf{S2}, the entity mention ``\textit{Moscow}'' could be seen as referring to \texttt{wiki:Moscow}, the capital of Russia, or perhaps rather as referring to help from the Government of Russia (\texttt{wiki:Government\_of\_Russia}). While NIF supports specifying multiple identifiers or classes on an annotation, it does not support assigning different classes to different identifiers; while this would not be a problem for \textbf{S1} (both are \textit{Person}s), in \textbf{S2}, one possibility is a \textit{Place} while the other is an \textit{Organization}. We propose to separate the entity type specification from the annotation scope with a triple \textit{<$s$, enif:entityType, $o$>} for each link in the annotation, where $s$ denotes the KB identifier, not the mention. In Figure~\ref{fig:nif} we show the annotation of Moscow from sentence \textbf{S2} with NIF, followed by two triples that represent our extension. \end{comment} %---------------------------------------------------------------------------------- \section{NIF Construction} A number of EL datasets have either been computed from existing sources, or computed automatically. For example, DBpedia Abstracts is too large for human labeling to be feasible.\footnote{Details of the annotation process are not provided, but we assume it uses links already present in the corresponding Wikipedia texts.} On the other hand, the recently proposed BENGAL tool~\cite{Bengal2018} adopts a creative strategy for automatically generating gold standard datasets: rather than start with text, the authors propose to start with facts about entities from structured datasets (in RDF) and use verbalization components to convert these facts to text, recording which entities are used to generate which sentences; while this approach has the benefit of being able to generate very large and accurate gold standards, how representative the generated text is of real-world corpora depends on the quality of the verbalization component. On the other hand, per Table \ref{tab:datasets}, most datasets are constructed with manual intervention, and a number of systems have been proposed to help in this process. In previous work, we manually annotated a multilingual EL dataset called VoxEL~\cite{VoxEL2018}, generating NIF annotations; at the start of this process, we tried to find an existing tool that would aid in the annotation process, but we found that while some systems were unavailable, others (e.g., QRTool\footnote{\url{https://github.com/dice-group/QRTool}; January 1st, 2019}) we could not install, or did not offer features such as validation. Addressing these limitations, we propose NIFify: an open source tool that provides end-to-end support for EL annotation, including the import of text corpora\footnote{\url{https://users.dcc.uchile.cl/~hrosales/MSNBC_ACE2004_to_NIF.html}; Jan. 1st, 2019}; the import (including the conversion of MSNBC formats to NIF) of existing EL datasets; the addition and revision of annotations; custom tagging systems for annotations; visualizations of annotations; overlapping mentions; and finally, visualisations of the results of EL systems over the resulting dataset. The tool requires no installation and can be used either online or offline in a browser\footnote{\url{https://github.com/henryrosalesmendez/NIFify_v2}; January 1st, 2019}. For space reasons, rather than describe all the features of NIF, we focus on two group of features of particular importance to NIFify: \textit{validation} and \textit{result visualization}. %permits the annotation, visualization, and validation of NIF datasets in the same environment, as well as the comparison of EL systems. We design NIFify to capture specifications from different perspectives of annotations, allowing partial or total overlapping among them, as well as the cross-links specification. As a consequence of the annotation process, this tool is a suit to visualize and modify already proposed NIF datasets. Additionally, we include in NIFify functionalities to transform MSNBC-based datasets to NIF, used to transform the datasets MSNBC and ACE2004\footnote{\url{https://users.dcc.uchile.cl/~hrosales/MSNBC_ACE2004_to_NIF.html}}. %In our previous work~\cite{VoxEL2018}, we use NIFify to build VoxEL -- a multilingual dataset with the sentences/mentions manually annotated -- that manually aligned cross-language over 15 news from VoxEurop. Although this is a source of curated text, there were differences in the translations of the news, for example, in many cases, some proper names of entity mentions were translated by journalists as pronouns to other languages. Another common problem was the inclusion or deletion of sentences in the translations. For these reasons, we include in NIFify functionalities to deal with these situations, allowing the replacement, modification, and deletion of part of the text in order to align the mentions, as well as the elimination of whole sentences. %--------------------------------------------------------------------------------------- \section{Validation} Validation is a crucial step to help human experts ensure the production of a ground truth for gold standards, and EL datasets are no exception. Legacy EL datasets have been observed to contain errors or design choices that may affect the results of evaluation~\cite{Marieke2016,Kunal2017,ourAMW2018}; furthermore, target KBs may evolve, rendering some links obsolete. Erp et al.~\cite{Marieke2016}, analyze characteristics of seven EL datasets and find biases introduced by the decisions taken in the annotation process; they highlight the need for a more standard creation of datasets. Jha et al~\cite{Kunal2017} propose a set of validation rules and propose the EAGLET system to check these rules when constructing EL datasets; however, these rules are sometimes dogmatic, considering, for example, overlapping mentions to be errors when they are considered valid by other definitions~\cite{ourAMW2018}; furthermore, EAGLET requires execution on a command-line to highlight errors in the visualization, rather than being supported by the interface. %However, more than one definition of entity have been used in the community~\cite{ourAMW2018}, with NIFify we allow also the validation of NIF datasets, incorporating only \textcolor{red}{their [QUITAR]} general rules. For example, we consider that mention overlapping is suitable in some scenario of applications, but, this fact is constrained by \textcolor{red}{EAGLE} with the \textcolor{red}{their[QUITAR]} \textit{Overlapping Error} \textcolor{red}{rule}. \textcolor{red}{EAGLE requires an offline running, identifying errors first with Maven commands before the visualization\footnote{https://github.com/dice-group/Eaglet; January 1st, 2019}, instead of that, our proposal allows us to identify, show and correct the errors in an online/offline and one-run way, making a more friendly process.} %Some validators are completely dedicated to checking the consistency of the NIF format, but it is not took into account in EL validations. Mistakes in the structure of NIF datasets are commonly handled by the parsing script of benchmarks tools, validating in this way the syntax but not the content. This fact directly affects the evaluation process, taking the results corresponding to these erroneous annotations as \textit{false positives} rather than \textit{true positives}. For example, the position information in the URI of the subject of each annotation triple should match with the predicates \texttt{nif:beginIndex} and \texttt{nif:endIndex} (\textit{Format Error Type 1}). The string defined by these both predicates also should match with the string specified through the predicate \texttt{nif:anchorOf} (\textit{Format Error Type 2}). %Contrary to the previous validation proposal, NIFify allows for detecting possible errors present in terms of the mentions and the identifiers to which they are linked; specifically, the following rules are checked: %, checking of these two errors that are presented in popular datasets as DBpedia Spotlight. We fix the Format Errors of DBpedia Spotlight and release the corrected version\footnote{\url{https://users.dcc.uchile.cl/~hrosales/fixedDBpediaSpotlight.html}}. \begin{itemize} \item \textsc{Spelling Error} (SE): Mentions should neither start nor end in the middle of a word. %We highlight those annotation where the mentions are substring of other word that share characteres in same position. \item \textsc{Link Error} (LE): When linking to Wikipedia or DBpedia, identifiers should be the URLs/IRIs corresponding to an unambiguous, non-redirect page on Wikipedia. %We identified as error those annotation that link invalid URIs, or URIs that correspond to redirct or disambiguation page. \item \textsc{Format Error} (FE): We check the consistency of the NIF representation with two sub-rules: \begin{itemize} \item Annotations are typically assigned a subject IRI of the form \texttt{http://example.org\#char=$x$,$y$}, where $x$ and $y$ should correspond with the values given for \texttt{nif:beginIndex} and \texttt{nif:endIndex} respectively. \item The substring identified by these positions should correspond with that denoted by the \texttt{nif:anchorOf} property. \end{itemize} \item \textsc{Category Error} (CR): For those datasets with classes specified by the predicate \texttt{itsrdf:taClassRef}, NIFify allows the specification of custom rules in order to detect inconsistencies in the annotation classes. For example, the classes \texttt{dbo:Person} and \texttt{dbo:Event} should not be present on the same annotation as they are disjoint: an entity is typically not a person and an event at the same time. \end{itemize} %\textcolor{red}{Implementar las validaciones que comento aqui abajo en el latex.} % % Ojo: Hacer los siguientes validadores: % % Inconsistent Marking (IM). This category comprises entities that were marked in at least one of the documents but whose occurrence in other documents of the same dataset is not marked as such. For example, the entity Seattle was marked in Document 1 but is left out in Document 2. % % Missing Entity. The final categorisations of anomalies is a further extension of EM er- ror. This comprises the presence of entities which satisfy the type conditions of the gold standard but were not been marked. This tier of error falls under the dataset completion and violates Rule 5c. % % Ver si las entidades tienen "the" o "la" o "Mr" como parte del sufarce form cuando no debe % % NIFify then encodes rules to detect these errors and thus validate EL datasets. In order to test the prevalence of these errors in existing datasets, we ran NIFify's validation over EL datasets currently available in the NIF format (excluding those that we converted ourselves to NIF -- MSNBC and ACE2004 -- since we resolve such errors as part of the conversion). In Table~\ref{tab:validations}, we show the results of this validation process, where we can observe that all datasets considered contain errors of at least one type. \begin{table} \centering \caption{Errors found in current NIF datasets; the last dataset was labeled by us} \label{tab:validations} %\resizebox{\textwidth}{!}{ \begin{tabular}{lrrrr} \toprule \textbf{Dataset}~~~~~~~~~~~~~~~~~~~~~~~~~ & \ccell{SE} &\ccell{LE}& \ccell{FE}& \ccell{CE}\\\midrule %MSNBC &-- &-- &-- &--\\ %\midrule %ACE2004 &-- &-- &-- &--\\ \midrule DBpedia Spotlight &8 &23 &4 &--\\ %\midrule N3-RSS 500 &1 &34 &-- &--\\ %\midrule Reuters 128 &4 &71 &-- &--\\ %\midrule News-100 &9 &1515 &-- &--\\ %\midrule Wes2015 &-- &609 &-- &--\\ \midrule VoxEL &-- &8 &-- &--\\ \bottomrule \end{tabular} %} \end{table} In the majority of the cases, SE errors are introduced in the construction of the dataset with the addition of characters that do not belong to the mention, or on the contrary, leaving out part of a word that completes a mention; for example, in the DBpedia Spotlight dataset, the URI \texttt{wiki:Man} is associated with the three characters of the world \textit{perfor\underline{man}ce}. Other SE errors contained in the datasets involve missing spaces between words. %The SE errors of datasets MSNBC$_t$ and ACE2004$_t$ were fixed in the transformation process to NIF. The most frequent type of error encountered in the NIF dataset was LE: this is mainly due to the fact that KBs are constantly evolving, which may affect link consistency. For example, in Wikipedia, pages about specific entities may become disambiguation pages, or redirects to other pages. Such changes explain why our own dataset (VoxEL, created using NIFify) contains such errors: the external KB has evolved since its creation. The News-100 and Wes2015 contain a large number of LE errors beyond what can be explained by the KB changing: for example, in the Wes2015 dataset, 520 of its LE errors correspond to redirect pages, 48 to disambiguation pages, while the rest do not point to valid pages. Finally, the only dataset we found with FE-type errors was DBpedia Spotlight, which had problems with its NIF representation. On the other hand, we did not find any errors of type CE. We have published all errors found online for reference.\footnote{\url{https://users.dcc.uchile.cl/~hrosales/dataset_errors.html}; January 1st, 2019.} We conclude that most of the validation features of NIFify can help to improve the quality of EL datasets, including to find problems caused by the evolution of a KB over time. %--------------------------------------------------------------------------- \section{Result Visualization} %It is common in the research process to select available datasets instead of creating new ones. In this way we can take advantage of the previous results that other authors have had with these datasets to compare our results, however, the datasets are not the only factor that allows this comparison. All the decisions made in the comparison process is also decisive, such as the selection and implementation of the involved quality measures, the interpretation of the results of the EL systems, the decision of taking the annotations as true positives, etc. Once an EL dataset has been generated, the next step is to evaluate and compare EL systems using the dataset. A number of systems have been proposed to help evaluate and compare EL systems. Cornolti et al.~\cite{BAT2013} proposed the BAT framework, which they used to compare five EL systems over five datasets. Along similar lines, Usbeck et al. proposed GERBIL~\cite{gerbil-2015}, which extends the systems and (NIF) datasets supported. However, both frameworks produce comparative metrics, rather than visualizing the actual output of the EL tool(s). Another EL benchmark framework called Orbis~\cite{Orbis2018} was recently proposed that includes visualization of systems' responses; however, Orbis is not available in the provided URL.\footnote{\url{https://github.com/htwchur}; January 1st, 2019.}. Given that there is no clear definition on what EL systems should link~\cite{ourAMW2018}, we argue that metrics like precision and recall may not tell the full story, and that results may be due not only to the quality of the output produced by an EL system, but also whether or not it targets the same types of entities as labeled in the dataset. Comparing EL results with the ground truth labeled in a dataset under construction/revision may even lead to changes in the dataset.\footnote{Of course, we urge caution to ensure that bias is not introduced by adapting a dataset to suit a subset of tools evaluated.} Hence with NIFify we propose a benchmark framework to visualize the results of EL systems over the NIF dataset, highlighting both \textit{true positives} or \textit{false positives}, which allows a more qualitative assessment of both a given EL tool and an EL dataset, possibly in the context of a given application. Additionally, NIFify can be used to demo EL systems, offering a visual, friendly user interface. %--------------------------------------------------------------------------- \section{Conclusion} \label{sec:conclusion} In this short paper, we describe the NIFify system, which aims to address a number of shortcomings of existing tools for generating EL datasets and evaluating EL tools: in particular, NIFify simultaneously supports the creation, visualization, and validation of NIF datasets, as well as the comparison of EL systems. We first discussed some extensions to the NIF format to support mentions having multiple possible identifiers annotated with different types. We then provided a summary of the main features of NIFify for generating EL gold standard datasets, before focusing on features relating to validation, showing that existing EL datasets exhibit errors detectable by the tool, detecting a total of 2,321 errors across six datasets; we publish these errors online for reference: \url{https://users.dcc.uchile.cl/~hrosales/dataset_errors.html}. Finally, we discuss the importance of features for visualizing the results produced by an EL system, which are further implemented in the NIFify tool. A demo of the tool is available at \url{https://users.dcc.uchile.cl/~hrosales/NIFify\_v2.html} %A variety of tools have been proposed to annotate and validate NIF datasets; and also to support the comparison among EL systems through them but in a separately way. Here we propose also the tool NIFify which gather these three functionalities over both, the current NIF format and our NIF extension. Our tool allows the annotation of text corpora including the specification of overlapped mentions and their entity type. Through the annotation functionality, we transformed MSNBC and ACE2004 datasets from its own format to NIF, and thus, allowing their usage in processes designed for NIF guidelines. %NIFify disposes a validator of a set of rules to identify errors presented on benchmark datasets and an automatic way to solve some of them. We apply our validator to some NIF benchmark datasets of the literature, discovering a total of 2322 errors. The errors detected in DBpedia Spotlight were fixed and we release the corrected version of it, available for download. We incorporate also in NIFify a benchmark framework that allows the visualization and measurements of state-of-the-art approaches. %Aqui se puede encontrar una lista de datasets en NIF: %http://dashboard.nlp2rdf.aksw.org/ \begin{comment} {\footnotesize \paragraph{Acknowledgements} The work of Henry Rosales-M\'endez was supported by CONICYT-PCHA/Doctorado Nacional/2016-21160017. The work was also supported by the Millennium Institute for Foundational Research on Data (IMFD) and by Fondecyt Grant No.\ 1181896.} \end{comment} %---------------------------------------------------------------------------- \section{Acknowledgments} The work of Henry Rosales-M\'endez was supported by CONICYT-PCHA/Doctorado Nacional/2016-21160017. The work was also supported by the Millennium Institute for Foundational Research on Data (IMFD) and by Fondecyt Grant No.\ 1181896. % % The next two lines define the bibliography style to be used, and the bibliography file. \bibliographystyle{ACM-Reference-Format} \bibliography{bibfile} \end{document}
{ "alphanum_fraction": 0.7560336848, "avg_line_length": 90.9507186858, "ext": "tex", "hexsha": "9d7bd4ed9d12c19aa0451fcc05f140df795690f2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c6c1e2318e114493ec25605b36f655933557cd00", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "henryrosalesmendez/NIFifyPaper", "max_forks_repo_path": "main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c6c1e2318e114493ec25605b36f655933557cd00", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "henryrosalesmendez/NIFifyPaper", "max_issues_repo_path": "main.tex", "max_line_length": 1282, "max_stars_count": null, "max_stars_repo_head_hexsha": "c6c1e2318e114493ec25605b36f655933557cd00", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "henryrosalesmendez/NIFifyPaper", "max_stars_repo_path": "main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 11233, "size": 44293 }
\documentclass[12pt]{article} \usepackage[utf8]{inputenc} \usepackage{caption} \usepackage{mathtools} \usepackage{amssymb} \usepackage{graphicx} \usepackage{tikz} \usepackage{hyperref} \usepackage{cleveref} \usepackage{fullpage} \usepackage{listings} \usetikzlibrary{arrows} \usetikzlibrary{calc} \usetikzlibrary{positioning} \usetikzlibrary{shapes} \newenvironment{Figure} {\par\medskip\noindent\minipage{\linewidth}} {\endminipage\par\medskip} \crefformat{footnote}{#2\footnotemark[#1]#3} \hypersetup{colorlinks=true,linkcolor=blue,urlcolor=blue} \lstset{frame=single,basicstyle=\ttfamily} \newcommand{\mylstinline}[2]{\fbox{\lstinline[language=#1]{#2}}} \title{Smoothernity} \date{\today} \begin{document} \maketitle \tableofcontents \pagebreak \section{Idea} The idea is to create a program which sports the following features: \begin{enumerate} \item Huge immersive virtual universe\footnote{ E.g. from the scale of stars down to planets, buildings and rooms.}. \item Low physical storage space consumption\footnote{ E.g. space on hard drive(s) of local or remote machine(s). Essentially, this means low \emph{Kolmogorov's comlpexity} of the program.}. \item Real-time universe generation\footnote{ The world seamlessly generates on-the-fly as player moves through the universe.}. \item Constant frame rate\footnote{ Smooth visual animation without interruptions during the whole time the program is running.}. \item Ports for different platforms\footnote{ Both desktop and mobile.}. \item Effective utilization of the available resources on each platform\footnote{ That is, it should yield ``better picture'' on ``faster platforms''.}. \item Interactive universe editor\footnote{ The designer can navigate through the universe, make changes and see the result as soon as possible.}. \item Small platform-dependent code part\footnote{ Let's say, \(\le 10,000\) lines of code for each port. E.g. it's OK to have many ports with large \emph{cumulative} size as long as none of them exceeds this limit when counted separately.}. \item Peer-to-peer multiplayer. \end{enumerate} \section{Generation\label{Gen}} \emph{Procedural generation} means the production of output data by some algorithm which consumes some input data. The end result\footnote{ Either of a single algorithm, or of multiple algorithms chained together. } of visual procedural generation is visible contents expressed in terms of triangles and textures. The generation algorithms must satisfy following requirements: \begin{enumerate} \item Output data must be orders of magnitude larger than the input data\footnote{ Otherwise a simple copying of inputs to outputs will do. }. \item There must be many possible outputs\footnote{ Otherwise a constant output data set will do. }. \item Same input must always yield the same output. \item Output data must be aesthetically appealing at least for some inputs\footnote{ Otherwise just white noise will do. }. \item Algorithm must finish in a matter of seconds on commodity hardware. \item Algorithm must be capable of yielding better results if using more computational resources\footnote{ E.g. yield low detail picture on low-end machine and high detail on high-end. }. \item Program code implementing the algorithm should be compact. \end{enumerate} Whole generation process can be split into several levels of abstraction. \subsection{Primitives Level\label{Prim}} Generates reusable low-level building blocks. Implemented in code, optimized for each platform. Heaviest computations should be performed on this level. The examples of primitives are: ``marching cubes of function \(f(x,y,z)\)'', ``perlin noise landscape'', ``spline surface'', etc. It operates with such entities as ``triangles'', ``meshes'', ``lights'', ``shaders'', ``OpenCL kernels'', ``collision objects'', ``rigid bodies'', ``sound waves data'', etc. \subsection{Concepts Level\label{Concept}} Generates reusable high-level building blocks from other reusable building blocks (both high- and low-level). Implemented in platform-independent code. It composes primitives and other concepts into higher-level structures, such as ``character'', ``vehicle'', ``building'', ``city'', etc. \subsection{World Level\label{World}} Generates unique content from high-level building blocks. Implemented as data. Connects concepts into directed acyclic graph of dependencies and specifies attribute values\footnote{ E.g. city of 20 buildings with 3\ldots5 rooms each.}. This graph can be edited either interactively by the designer or programmatically through ``game script''\footnote{ E.g. add city \(C\) to planet \(P\) after the player had picked up item \(I\).}. Concepts implementation code is itself a dependency for every instance of this concept in the dependency graph. Thus, when concept code is changed, all the affected instances and their dependents can be regenerated. Implementation-wise, one possible way to do this may be by tracking all primitives created by the particular concept, so that when this concept is changed, all primitives can be disposed automatically. This reloading idea in general case is applicable for static objects only. That is, object state must depend only on other objects' states, but not on the self state in a previous moment in time. It won't work for dynamic case, e.g. because relative phase between objects' states is changed due to reload. For dynamic case, we can edit the initial state with frozen time\footnote{ ``Frozen time'' means that frame updates still occur, but time increments are 0. It's necessary to let generation go on, while dynamic processes, like physics and AI, are halted. } at 0-time. Then start simulation by clicking ``play'' button. To continue editing, we ``pause'' again, reset to 0-time. Game scenario state can be represented by a vector with integer components. This vector is essentially an input for world generation. Some nodes behave conditionally to the game scenario state. To check game at various stages it's sufficient to change this vector. Affected nodes can be regenerated on-the-fly, just as regular dependencies. \section{Runtime} Runtime state is the most volatile part of game state. It's not persistent, it exists only while game runs. Generation part (\Cref{Gen}) defines a shape of the universe. Runtime part defines its behavior. Runtime part depends on generation part\footnote{ Runtime part can ask, which objects should be placed in given bounding box of the universe, what is the geometry of these objects, etc. }. Every resource generated by generation part, is owned by generation part. Whenever generation part changes\footnote{ Because game scenario vector has been changed, or because of some editor command. }, runtime part restarts. That is, all game entities spawned in runtime part\footnote{ E.g. mesh instances: characters, projectiles, etc. } are discarded, memory is cleared and runtime script is restarted from scratch. After restart, runtime part can quickly retrieve resources that were already generated before restart\footnote{ Generated resources are owned by generation part, and therefore are not affected by runtime restart. }. Runtime part can change game scenario vector, thereby effectively restarting itself. This is how game script is implemented. It should be possible to do this restart seamlessly, within single frame update, so that player wouldn't notice that it occured at all\footnote{ Because already generated resources will be retrieved instantaneously after restart, without regeneration. }. \section{Editing} World level (\Cref{World}) of generation can be edited interactively. Changes propagate through directed acyclic graph of dependencies, so that only affected parts are regenerated. For desktop computers, interactive editing can be implemented in two windows: the game (3D scene) and the editor (GUI). Edited data is valuable, and must be preserved at all costs. It's desirable to minimize amount of code that can potentially lead to data corruption. One way to do this is to separate the game and the editor. The game is volatile, it should crash as early as possible to diagnose malfunctions. The editor should store its data after modification ASAP, to minimize loss in case if the editor crashes. The editor consists of two parts: front-end and back-end. Front-end is essentially a GUI: JavaScript application running in web browser, that user interacts with. Front-end is communicating only with back-end. Back-end is responsible for data persistance and synchronizing with the game. Back-end communicates with the game and with front-end. It's preferable to have back-end as a standalone application\footnote{ E.g. using Nodejs. }. This way data should be less likely to get lost or corrupted. The communication protocol between the game and editor back-end can be as simple as sending script code to execute on either part. \section{Scale} The game universe should be large. The challenge is that real-time computations\footnote{ Such as affine transformations, physics simulation, etc. } require using of floating point numbers, and these have finite precision. Only a small portion of the universe is immediately visible to the observer, though. Thus, one way to mitigate this challenge is to use ``local'' coordinate system, ``centered'' on the observer\footnote{ That is, all objects near observer will have coordinates near \(0\), providing full floating-point precision for computations. }. To represent observer's global position in the universe, another set of coordinates can be used\footnote{ These coordinates specify observer's ``offset'' from the center of the universe. If we use double-precision floating point numbers to encode integer global coordinates in meters, this gives us a cube with a side of \(\approx 10^{15}\) meters (or 0.1 light year), which is \(\approx 1,000\) times larger than Solar system. If we add another set of double-precision floating point numbers encoding integer global coordinates in 0.1 of a light year, this gives us a cube with a side of \(\approx 10^{14}\) light years, which is \(\approx 10,000\) times larger than the observable universe. }. Due to the limitations of floating-point precision of Z-buffering, there's an upper limit of the size of the scene, after which Z-fighting appears. To mitigate this problem one can render scene in multiple passes: \emph{levels of detail}\footnote{ For example, render largest scene first (planets, stars). Then render smaller scene (landscape up to the horizon, mountains, clouds). Finally, render scene closest to the observer (grass, characters, buildings). }. To navigate through the universe, one can ``shift'' between levels of detail. Only levels of detail larger or equal to the one the observer is currently in, are rendered\footnote{ E.g. if the observer is on the level of ``planets'' and ``stars'', he shouldn't see ``grass'' and ``buildings''. Only ``planets'' and ``stars'' should be visible.}. Observer's movement speed is adjusted according to the current level of detail\footnote{ Observer should move faster through the level of detail of planets, than he moves through the level of detail of grass. }. \section{Grid} Only a small part of the universe is in the immediate vicinity of the observer in each moment of time. To split the universe into parts, one may use a \emph{spatial grid}. Then, the query to the generation algorithm might look like: ``generate the scene in local coordinates, corresponding to the part of the universe in a box with one corner in \((x_1, y_1, z_1)\) and another in \((x_2, y_2, z_2)\)''. The algorithm can figure out how detailed the scene should be from the size of the query box. \section{Multiplayer} Gameplay should be hardcore, relying on player's skill rather than farming\footnote{ Like Mortal Kombat, Cave Story, Side Scroller, etc.}. This requires immediate reaction to player's actions. Hot-seat multiplayer\footnote{ Where all players are playing on the same system, sharing the screen. } fits well with this scheme. Distributed multiplayer in some way should be implemented as well. It's desirable to use peer-to-peer communication for multiplayer to avoid spending resources on servers. There are two modes of multiplayer interactions: active and passive. \subsection{Passive Mode} In passive mode players just see each other's position\footnote{ Like in Dark Souls: show a ghostly form of other players only. This approach saves the neccessity to synchronize game worlds between players, and softens the annoyance of remote players walking through things. } and chat with each other\footnote{ \label{Chat} Perhaps it's a good idea is to somehow utilize an established chat software/service, like IRC, Closed Circles, Facebook, etc. This same chat service can serve as an out-of-game mean of communication. There's an IRC client library in C called \href{http://www.ulduzsoft.com/libircclient/index.html}% {libircclient}. }. Passive mode should be able to function with any connection quality. There must be a way for clients to discover each other. Clients maintain a list of other clients they're connected to, and communicate with them to discover more clients. Clients ignore clients whose players are too far away, and there must be a limit of how many other players can be seen by one player\footnote{ To avoid crowds. }. To start discovery, there must be a persistent clients at key locations\footnote{ Like towns, portals, dungeon entrances, etc. }, who are always online, whose sole purpose is to help discovering other clients. Each client also maintains a friend list - clients listed there are prioritized over other clients in discovery process and appear first, which lets players pick their social circle. The consequence is that there's no way to discover a client if they're not near a persistent client or not in a friend list of the discovering client. For identity and IP address discovery it might be possible to use a chat system\cref{Chat}. \subsection{Active Mode} In active mode players actually play together as in hot-seat mode. Every player should see the same state of the game universe. One way to achieve this is to perform the same deterministic game state updates on each client: collect all players inputs, then update game state on every client using these same inputs. This scheme puts all players in the same advantage position: updates run as fast as slowest client does. Game logic is computed redundantly in each client, which effectively eliminates opportunities to cheat by altering game client\footnote{ E.g. if some client is altered to deal more damage than others, it'll only lead to desynchronization: in one client monster will be dead, in other - still alive.}. Thus, it's desirable to play only with players having a good network connection. Players should be able to choose with whom they want to play actively\footnote{ E.g. by sending and accepting invitations. }, and to kick out players with slow connection. \section{Implementation} One of the requirements is portability and small platform-dependent code size. This means putting as much code as possible to the platform-independend part. Here is how these two parts may look like. \subsection{Platform-Dependent Part} The examples of tasks handled by this part are: window creation, render context creation, generating and rendering primitives (\Cref{Prim}), working with network and filesystem, capturing user input, etc. As all of these tasks are low-level, natural choice is to use native low-level language for the target platform. As of 2014, current bindings of OpenGL and OpenCL to scripting languages are somewhat limited and outdated. So in order to use latest available standards, best bet is to use a native language. Possible choices are: \begin{itemize} \item In case of Windows or Linux it's C++. \item In case of Mac OS X or iOS it might be a mixture of C++ and Objective C. \item In case of Android OS it's Java. \end{itemize} \subsection{Platform-Independent Part} The examples of tasks handled by this part are: concepts generation (\Cref{Concept}), world generation (\Cref{World}), handling user input, tracking global coordinates, interacting with the editor, scripts reloading, etc. Most of the code will be in this part. Thus, the language must allow for writing compact programs, and also be fast enough, so that there's little incentive to rewrite this part to low-level language. The language must be highly portable and well-adopted in the industry as well. Possible choices are: JavaScript\footnote{ E.g. V8 implementation, as it sports JIT compilation. Although V8 isn't cross-platform, so portability might be an issue. }, LuaJIT, C\#. \subsection{Scripts Sandboxing} It might be possible to do scripts reloading entirely from Lua. \emph{Host} Lua script will load \emph{guest} scripts and run them in protected mode. If any of guest scripts throw error, host script intercepts it, shows notification, asks user to modify offending script and then reloads it. It's entirely possible to sandbox guest scripts from within host script by using \mylstinline{bash}{load} function with custom environment. It's also possible to setup a callback firing every \(N\) instructions using \mylstinline{bash}{debug.sethook}, which'd allow to break infinite loops. Remaining reasons to reload host script are: \begin{enumerate} \item Error in host script. \item Out-of-memory. \end{enumerate} \section{Milestones} \subsection{Minimal Game} The goal of this milestone is to create a minimal game which engages all subsystems of the engine. Here's key gameplay elements: \begin{enumerate} \item Players roaming heightmap on their cars from checkpoint to checkpoint. \item At some places rigid body boxes are placed on the heightmap. \item Each checkpoint is a save point and also a hub for passive multiplayer. \end{enumerate} The minimal game should include all elements that final game will: \begin{enumerate} \item Sounds. \item Main menu. \item Save/load. \item Multiplayer. \item Demo recording/playback. \item Editor. \item Ports for Windows, Linux and Mac. \end{enumerate} \section{Assorted Notes} \begin{enumerate} \item \href{http://gamedev.stackexchange.com/questions/46424/try-catch-or-% ifs-for-error-handling-in-c}% {Discussion}, whether to use or not exceptions in C++ for game clients. \item Think of a program execution in terms of possible histories of changes in the environment\footnote{ Environment can be a keyboard, display, memory, hard drive, etc. }. Validity of a program is a constraint on these possible histories\footnote{ E.g. all possible histories for a game should contain states where each combination of keys can be pressed or released at any moment of time, while no game resource files are modified, and there's enough memory. }. \item For networking purposes, one can use ZeroMQ. It's small, flexible, mature, portable and well supported. There are bindings for Lua and NodeJs. \item There's a considerable amount of research and development in peer-to-peer networking done in \href{http://maidsafe.net/}{MaidSafe} project. \item There's a way in NodeJs to monitor file changes in runtime: functions \mylstinline{bash}{fs.watch} and \mylstinline{bash}{fs.watchFile}. \item Memory pools should always grow automatically and then report resulting sizes in the log. The goal is to avoid growing by adjusting initial sizes, but also to avoid crashing when these initial sizes are off the mark. \item There's a code coverage tool for Lua, called \href{https://github.com/keplerproject/luacov}{LuaCov}. \item To verify if the game scenario is passable, one may use automated bots. Ideally, when bot is activated, at any point in the game, it controls player to complete the story line. Bot perceives current state of the game universe and emulates player controls\footnote{ E.g. keyboard, joystick signals, etc. }. To allow bot to perceive everything that it needs, it may be a good idea to design the game around the concept of bot from the very beginning. \item \href{http://en.wikipedia.org/wiki/List_of_mathematical_shapes}% {List} of mathematical shapes. \item \href{http://en.wikipedia.org/wiki/Wang_tile}{Wang tiles} can be \href{http://procworld.blogspot.com/2013/01/tile-genetics.html}{used} to generate \href{http://graphics.stanford.edu/papers/tile_mapping_gh2004/final/% paper_final.pdf}{textures}, \href{http://nothings.org/gamedev/herringbone/}{mazes}, and even generalized to \href{http://www.jucs.org/jucs_1_10/an_aperiodic_set_of/Culik_II_K.pdf}% {Wang cubes}. \item Players data validation can be done by playing back recorded demos on trusted peers and signing resulting data with public-key cryptography. \item Gameplay idea. Players collect coins around the universe. Players store their coins in their hideouts. Coins may be moved from one hideout to another, but only if player is near the hideout that coins are moved from. Empty hideout can be relocated to another place. Players have some means to detect concentration of coins in the universe, both in the wild and in hideouts of other players. When player picks up coins from other player's hideout, he gets less coins than were there. Players are ranked by how long they have had most coins of all. This mechanics means that pressure on top players will rise and they will be eventually replaced by other players. \end{enumerate} \end{document}
{ "alphanum_fraction": 0.7374471977, "avg_line_length": 39.6597582038, "ext": "tex", "hexsha": "65d6a5f351c9cec62d062da768ee587ec92fab63", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a4c7d290ca601bddf680ef040f3235dc7b029a6b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sivarajankumar/smoothernity", "max_forks_repo_path": "docs/smoothernity.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a4c7d290ca601bddf680ef040f3235dc7b029a6b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sivarajankumar/smoothernity", "max_issues_repo_path": "docs/smoothernity.tex", "max_line_length": 80, "max_stars_count": null, "max_stars_repo_head_hexsha": "a4c7d290ca601bddf680ef040f3235dc7b029a6b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sivarajankumar/smoothernity", "max_stars_repo_path": "docs/smoothernity.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5131, "size": 22963 }
\section{Random Triangles in High Dimensions} For a triangle $\Delta$, we will let $\mathcal{Q}$ be a measure of the \textit{quality} of the triangle as defined by \begin{equation} \label{eq:triangle_score} \mathcal{Q}(\Delta) = \frac{4\sqrt{3} A}{ \ell_1^2 + \ell_2^2 + \ell_3^2} \end{equation} where $A$ is the area of the triangle, and $\ell_1$, $\ell_2$ and $\ell_3$ are the lengths of the three sides. Without performing any computations, can you determine the maximum and minimum values that $\mathcal{Q}$ can take? Can you describe what kinds of triangles are associated with high, and with low, values of $\mathcal{Q}$? In the first part of this project, you will write a function that computes the quality of a triangle in $\mathbb{R}^2$, given the coordinates of its vertices. \begin{itemize} \item Write a function, \texttt{tri\_lengths()}, that takes as its arguments the three vertices of a triangle and returns the lengths of the three sides of the triangle. \item Write a function, \texttt{tri\_angles()}, that takes as its arguments the three vertices of a triangle and returns the angles of the triangle. \item Write a function, \texttt{tri\_area()}, that takes as its arguments the three vertices of a triangle and returns the area of the triangle. \item Write a function, \texttt{tri\_score()}, that takes as its arguments the three vertices of a triangle and returns the quality score of the triangle as defined in equation (\ref{eq:triangle_score}). \item Test your function \texttt{tri\_score()} on the four triangles below. As a check on your function, the correct values are: \begin{align*} \mathcal{Q}({\color{green}{\Delta}}) &= 0.597\cdots, & \mathcal{Q}({ \color{blue}{\Delta}}) &= 0.413\cdots,\\ \mathcal{Q}({\color{red}{\Delta}}) &=0.172\cdots , & \mathcal{Q}({ \color{purple}{\Delta}}) &= 1. \end{align*} \end{itemize} \begin{center} \begin{tikzpicture}[scale = 1.8] \draw[ultra thick,red!60!white] (0,0)node[below] {$(0,0)$} -- (-1.5,.5)node[above]{$(-3,1)$} -- (1,0)node[right]{$(2,0)$} --cycle; \draw[ultra thick, green!60!black] (-0.5,1.5)node[left] {$(-5,0)$} -- (2,1.5)node[right]{$(0,0)$} -- (2,.5)node[right]{$(0,-2)$} -- cycle; \draw[ultra thick, red!50!blue!80] (3,-.5)node[above left] {$-1,0$} -- (4,-.5)node[above right] {$(1,0)$} -- (3.5,.2)node[above]{$(0,\sqrt{3})$} -- cycle; \draw[ultra thick, blue!80!black] (5,0)node[below]{$(0,0)$} -- (4.8,2)node[left]{$(-0.5,4)$} -- (5.2,2)node[right]{$(0.5,4)$} -- cycle; \end{tikzpicture} \end{center} Many applied mathematicians regularly work with data sets where each data point is associated with many different features (e.g. each medical patient is associated with their own temperature, heart rate, blood pressure, etc). Sometimes the physical, three-dimensional world can provide useful intuition for the geometry of higher dimensional space. Most of the time, however, we are unable to grasp all the weird ways that the higher dimensional space is `bigger'. In the second part of this project, you will compare the expected quality of random triangles in $\mathbb{R}^2$ with that of random triangles in $\mathbb{R}^{10}$. \begin{enumerate}[(a)] \item \begin{enumerate}[i.] \item Use a random number generator to create a random point, \(\bm{x} = (x_1,x_2)\), that is normally distributed in $\mathbb{R}^2$. The functions \texttt{randn()} (Matlab), \texttt{np.random.randn()} (Python), or \texttt{randn()} (Julia) may be useful. \item Repeat part i. until you have three points, $\bm{x} = (x_1,x_2)$, \(\bm{y} = (y_1,y_2)\), and \(\bm{z} = (z_1,z_2)\). Let $\Delta$ be the triangle with $\bm{x}$, $\bm{y}$, and $\bm{z}$ as its vertices. Use your function \texttt{tri\_score()} to compute $\mathcal{Q}(\Delta)$. \item Repeat parts i. and ii. until you have the quality scores for 100,000 random triangles. Plot a histogram of your data. \end{enumerate} \item Repeat this process for triangles in $\mathbb{R}^{10}$. That is, use a multivariate normal random number generator to generate 3 points in $\mathbb{R}^{10}$, $\bm{x} = (x_1,x_2,\dots x_{10})$, $\bm{y} = (y_1,y_2,\dots,y_{10})$, $\bm{z} = (z_1,z_2, \dots, z_{10})$. Let $\Delta$ be the triangle with $\bm{x}$, $\bm{y}$, and $\bm{z}$ as its vertices, and compute $\mathcal{Q}(\Delta)$. Repeat for 100,000 random triangles. \item Compare the histogram for triangles in $\mathbb{R}^2$ with a histogram for triangles in $\mathbb{R}^{10}$. Provide a geometric explanation for why the random triangles in higher dimensions are much closer to equilateral than the random triangles in lower dimensions \end{enumerate}
{ "alphanum_fraction": 0.6948895626, "avg_line_length": 92.36, "ext": "tex", "hexsha": "3d333ae5daab23dcd9cb3ca557d097c287980c91", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-07-27T01:10:02.000Z", "max_forks_repo_forks_event_min_datetime": "2021-07-25T18:18:19.000Z", "max_forks_repo_head_hexsha": "16845412db0bd18469db67075d31a6189d968c56", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "colinlclark/integration_workshop", "max_forks_repo_path": "random-triangles-high-dimensions.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "16845412db0bd18469db67075d31a6189d968c56", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "colinlclark/integration_workshop", "max_issues_repo_path": "random-triangles-high-dimensions.tex", "max_line_length": 428, "max_stars_count": null, "max_stars_repo_head_hexsha": "16845412db0bd18469db67075d31a6189d968c56", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "colinlclark/integration_workshop", "max_stars_repo_path": "random-triangles-high-dimensions.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1468, "size": 4618 }
\section{Protocol Specification} The protocol portion of Taxicoin is designed to be open. As such, anybody should be able to implement it in their own software. The following section of this document should be sufficient to do so. \subsection{Methods} Each of these methods is intended to be part of a smart contract. When one is called, it will modify the state of the contract, and/or return a value. Any action taken through a contract is considered to be \textit{formal} and binding (once the other party has agreed where applicable). The specified arguments are to be supplied when calling that function of the contract, with the types representing built-in Solidity language types. The \textit{payable} keyword indicates that a method accepts a transaction with a currency value attached. In instances where the preconditions for a method are not met, the method will revert and the state will be unmodified. \subsubsection{Driver Advertise} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Takes a deposit from a driver and publishes their location and public key. \item [Arguments] Latitude: String\footnotemark \\ Longitude: String \\ Public Key: String \item [Payable] Driver deposit \item [Preconditions] User must not be currently on a journey, either as a driver or rider. Deposit must either have been already provided, or sent with this transaction. \item [Postconditions] The driver's location and public key are published, and the value of the deposit provided by the driver is recorded. If any deposit over the required amount was provided with the transaction, the excess is returned. \end{description} \footnotetext{Strings are used for latitude and longitude as floating point numbers can introduce inaccuracies due to the way they are stored.} \subsubsection{Driver Advert Revoke} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Removes an active driver's advertisement. \item [Arguments] None \item [Payable] No \item [Preconditions] User must be advertised as a driver. \item [Postconditions] The driver is removed from the list of active drivers, indicating that riders should not send job proposals to this driver. The previously supplied deposit is not returned. \end{description} \subsubsection{Rider Create Journey} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Accepts a quoted fare for a journey as a rider and forms the rider's part of a contract between driver and rider. Intended to be called after an off-chain negotiation with \lstinline{job} and \lstinline{quot} messages. \item [Arguments] Driver Address: address \\ Fare: uint, value in \textit{wei}\footnotemark, $n > 0$ \\ Public Key: String \item [Payable] Fare plus rider deposit \item [Preconditions] The user at the provided address must be an actively advertised driver, and not currently on a journey. The user calling this method must not be an actively advertised driver, nor be part of a journey as either rider or driver. The full rider deposit, plus an amount equal to the provided \lstinline{fare} must have been provided with this transaction. \item [Postconditions] The rider's intent to travel with the specified driver at the specified price is published. At this stage, the agreement is not binding until the driver accepts, before which the journey may be cancelled, with the rider deposit and fare being returned in full. \end{description} \footnotetext{Wei is the smallest denomination of the Ether currency used in the Ethereum network.} \subsubsection{Rider Cancel Journey} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Cancels a journey which has not yet been accepted by a driver. \item [Arguments] None \item [Payable] No \item [Preconditions] Rider must be part of a journey, for which the driver has not already accepted. \item [Postconditions] The rider is removed from the journey. From this point it is no longer possible for the driver to accept the journey. The rider's deposit and fare are returned. \end{description} \subsubsection{Driver Accept Journey} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Formally accepts a job as a driver, committing both the rider and driver to its completion. \item [Arguments] Rider Address: address \\ Fare: uint, value in \textit{wei} $n > 0$ \item [Payable] No \item [Preconditions] Driver must be actively advertised and have provided the driver deposit. Rider must have formally created a journey with the driver set to the caller of this method, and the fare of equal value to the argument provided. \item [Postconditions] The driver is marked as being on a journey with the specified rider. From this point, the journey is considered to be in progress, and any attempt to change any aspect of the journey will require an agreement to be made between both rider and driver. \end{description} \subsubsection{Complete Journey} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Marks the current journey as completed, as either the rider or driver. \item [Arguments] Rating: uint8, $1 \leq n \leq 255$ with 255 being the \enquote{best} \item [Payable] No \item [Preconditions] The caller of the method must either be a driver or rider who is currently on a journey. \item [Postconditions] The caller of the method is marked as having completed the journey, however they are still part of this journey until the other user has also called this method. The rating for the other user is stored. If the other party has already called this method, then the ratings for both parties are applied to their overall rating, and the journey is formally completed. The rider and driver deposits are returned, and the fare transferred to the driver. However, in cases where the fare is zero (only possible where the fare has been altered during a journey to indicate that the journey should be cancelled), the driver's deposit is not returned. \end{description} \subsubsection{Driver Propose Fare Alteration} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Formally proposes the alteration of the fare for a journey. Intended to be called after an off-chain negotiation with \lstinline{Propose Fare Alteration} messages. \item [Arguments] New Fare: uint, value in \textit{wei} \item [Payable] No \item [Preconditions] The user calling the method must be a driver, and currently be on a journey. \item [Postconditions] The driver's proposed new fare is recorded. The new fare does not take effect until the rider calls the \lstinline{Confirm Fare Alteration} method. \end{description} \subsubsection{Rider Confirm Fare Alteration} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Confirms the alteration of the fare for a journey. \item [Arguments] New Fare: uint, value in \textit{wei} \item [Payable] Difference between old and new fares, if new is higher \item [Preconditions] The user calling the method must be currently on a journey. Driver must have previously agreed the same new fare with the \lstinline{Alter Fare} method. In the case that the new fare is higher, the difference must have been provided with this transaction. \item [Postconditions] The new value for the fare for the journey is recorded. In the case that the new fare is lower, the difference is returned to the rider. If the new fare is zero, the journey is considered to be cancelled - the journey may now be completed with the rider's deposit being returned, and no fare being paid to the driver. \end{description} \subsubsection{Get User Type} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Returns an integer representing the type of user at the provided address. \item [Arguments] User Type: uint8 \item [Payable] No \item [Preconditions] None \item [Postconditions] Returns an integer between 0 and 3, representing the enum \{ None, Driver, ActiveDriver, Rider \}. \end{description} \subsubsection{Get Driver} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Returns the details of the driver at the given address. \item [Arguments] Driver Address: address \item [Payable] No \item [Preconditions] None \item [Postconditions] If the address provided is of a user who has previously (or is currently) advertised as a driver, the details of the driver will be returned. Otherwise, all zero-values will be returned. \end{description} \subsubsection{Get Next Driver} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Returns the details of the driver next in the list of advertised drivers after the given address. \item [Arguments] Driver Address: address \item [Payable] No \item [Preconditions] None \item [Postconditions] If the address provided is of a user who has previously (or is currently) advertised as a driver, the details of the next driver in the list will be returned. If the zero-address (\lstinline{0x0}) is provided, the first driver in the list is returned. If the address provided is for the last driver in the list, the zero-address is returned. \end{description} \subsubsection{Get Previous Driver} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Returns the details of the driver previous in the list of advertised drivers after the given address. \item [Arguments] Driver Address: address \item [Payable] No \item [Preconditions] None \item [Postconditions] If the address provided is of a user who has previously (or is currently) advertised as a driver, the details of the previous driver in the list will be returned. If the zero-address (\lstinline{0x0}) is provided, the last driver in the list is returned. If the address provided is for the first driver in the list, the zero-address is returned. \end{description} \subsubsection{Get Rider} \begin{description}[leftmargin=8em,style=nextline] \item [Description] Returns the details of the rider at the given address. \item [Arguments] Rider Address: address \item [Payable] No \item [Preconditions] None \item [Postconditions] If the address provided is of a user who has previously used (or is currently using) the system as a rider, the details of the rider will be returned. Otherwise, all zero-values will be returned. \end{description} \subsection{Messages} Driver and rider user clients should be listening for the following messages, where applicable. These messages are communicated via the Whisper protocol. Message topics are always a length of 4 bytes (4 ASCII characters), therefore any topics listed here of a length less than 4 byes are right-padded with spaces. \subsubsection{Job Proposal} \begin{description}[leftmargin=6em,style=nextline] \item [Topic] \lstinline{job} \item [Purpose] This message is sent by a rider to a prospective driver, indicating that they wish to make the described journey. It is intended to be sent to advertised drivers matching a specified criteria, e.g. within a certain distance, with at least a certain reputation. However the sending of these messages is not intended to be carried out manually by the user -- rather there is an automated process which fetches the list of active drivers and determines which to propose to. \item [Response] Should a driver be interested in a proposal, they respond with a quote message. \item [Payload] Please see below. \end{description} \lstinputlisting{res/job-message.json} \subsubsection{Driver Quote} \begin{description}[leftmargin=6em,style=nextline] \item [Topic] \lstinline{quot} \item [Purpose] This message is sent by a driver as a response to a job proposal. It contains the network address of the driver, as well as the fare for which the driver is willing to take on the job. At this point, the quote is not binding. Quote messages with a fare of -1 are considered to be a rejection, indicating that the driver does not wish to accept this job. \item [Response] If the rider chooses to accept the quote, they next call the create journey method, and respond with a \lstinline{Journey Created} message. \item [Payload] Please see below. \end{description} \lstinputlisting{res/quote-message.json} \subsubsection{Journey Created} \begin{description}[leftmargin=6em,style=nextline] \item [Topic] \lstinline{crea} \item [Purpose] This message is sent by a rider to a driver after they have created a journey. This is an indication that the rider has accepted the driver's quote. \item [Response] The driver should next call the \lstinline{Driver Accept Journey} method, and respond with a \lstinline{Journey Accepted} message. \item [Payload] Please see below. \end{description} \lstinputlisting{res/created-message.json} \subsubsection{Journey Accepted} \begin{description}[leftmargin=6em,style=nextline] \item [Topic] \lstinline{accp} \item [Purpose] This message is sent by a driver to a rider, after they have formally accepted the rider's journey, to indicate that both parties are now on a journey. \item [Response] None \item [Payload] Please see below. \end{description} \lstinputlisting{res/accepted-message.json} \subsubsection{Driver Location} \begin{description}[leftmargin=6em,style=nextline] \item [Topic] \lstinline{lctn} \item [Purpose] Sends the location of the driver to the rider with whom they are currently on a journey with. Allows the rider's client to display how far the driver is from the pickup location. \item [Response] None \item [Payload] Please see below. \end{description} \lstinputlisting{res/location-message.json} \subsubsection{Journey Completed} \begin{description}[leftmargin=6em,style=nextline] \item [Topic] \lstinline{cmpl} \item [Purpose] This message is sent to the other party when either one calls the \lstinline{Complete Journey} method. It indicates that the other should also (or dispute it). If the other party has already completed the journey, then this message indicates that the journey is fully complete. \item [Response] None \item [Payload] None \end{description} \subsubsection{Propose Fare Alteration} \begin{description}[leftmargin=6em,style=nextline] \item [Topic] \lstinline{nfar} \item [Purpose] This message is sent to the other party to indicate that the user wishes to alter the fare for the current journey. \item [Response] If the message is received by a driver for the first time, unprompted, they may either agree with the new proposed fare, and formally propose the new fare with the \lstinline{Propose Fare Alteration} method, or reject the new fare by sending a message of this type with an alternate fare. If the message is received by a driver for the second time, after agreeing to the proposed new fare, and the value in this message is unchanged, this indicates that the rider has called the \lstinline{Rider Confirm Fare Alteration} method, and the new fare has been applied. No further response is sent. However if the fare was changed, the driver may act as if this is the first such message (see above). If the message is received by a rider for the first time, unprompted, the rider may either agree to the new fare, and respond with a message of the same type with an unchanged value, or they may disagree and respond with their proposed new fare. If the message is received by a rider for the second time, after agreeing to the proposed new fare, and the value is unchanged, this indicates that the driver has called the \lstinline{Propose Fare Alteration} method, and that the rider should call the \lstinline{Rider Confirm Fare Alteration} method. They then respond with a message of this type, with the fare value unchanged. \item [Payload] Please see below. \end{description} \lstinputlisting{res/new-fare-message.json} \subsection{Contract Solidity Interface} The above methods can be translated to a Solidity interface, which should be conformed to for all Ethereum-based contracts implementing the Taxicoin protocol. This aids the goal of creating an open ecosystem as, in theory, if all implementations conform to this standard, any client should be able to work with any contract implementation. \lstinputlisting[language=Solidity]{../contracts/ITaxicoin.sol} \subsection{Protocol Network Architecture} This diagram is a high-level explanation of how the various components of the Taxicoin protocol fit together. \begin{figure}[h!] \centering \begin{tikzpicture}[node distance=1.8cm] \node (n0) [box] {Smart Contract}; \node (n1) [box, below left of=n0, yshift=-2cm, xshift=-3cm] {Rider Client}; \node (n2) [box, below right of=n0, yshift=-2cm, xshift=3cm] {Driver Client}; \draw [arrow] (n1) -- node[anchor=east, text width=2.5cm, above=0.5cm, left=0.4cm] {Formal rider actions (method calls)} (n0); \draw [arrow] (n2) -- node[anchor=west, text width=2.5cm, above=0.5cm, right=0.4cm] {Formal driver actions (method calls)} (n0); \draw [doublearrow] (n1) -- node[anchor=north, text width=3.5cm, below=0.2cm] {Peer-to-peer via Whisper (messages)}(n2); \end{tikzpicture} \caption{Visual representation of how different components communicate in Taxicoin.} \end{figure} As seen in the above figure, the smart contract acts as an \enquote{orchestrator} of sorts. Informal messages between a rider and driver are first conducted off-chain, as explained previously, and formal binding actions are performed through the contract. Although this architecture assumes the use of Ethereum smart contracts and the Whisper protocol for sending messages, any infrastructure may be used on which it is possible to implement an application which satisfies the protocol standard. Note that if another platform is used, it will not be interoperable with others based on the assumed technologies. \pagebreak \subsection{Process Flow Diagram} This describes the possible routes of interaction through the system. \begin{figure}[h!] \centering \begin{tikzpicture}[node distance=1.8cm] \node (n0) [startstop] {Driver Start}; \node (n1) [process, below of=n0] {Driver Advertise}; \node (n2) [process, below of=n1] {Rider Get Drivers}; \node (n3) [startstop, right of=n2, xshift=2.2cm] {Rider Start}; \node (n4) [process, below of=n2] {Rider Propose Job}; \node (n5) [decision, below of=n4, yshift=-0.6cm] {Driver take job?}; \node (n6) [process, right of=n5, xshift=2.2cm] {Driver Reject}; \node (n7) [process, below of=n5, yshift=-0.8cm] {Driver Quote Fare}; \node (n8) [decision, below of=n7, yshift=-0.6cm] {Rider accept fare?}; \node (n9) [process, right of=n8, xshift=2.2cm] {Rider Reject Fare}; \node (n10) [process, below of=n8, yshift=-0.8cm] {Rider Create Journey}; \node (n11) [process, below of=n10] {Driver Accept Journey}; \node (n12) [process, below of=n11] {Journey}; \node (n13) [decision, right of=n0, xshift=7cm, yshift=-2cm] {Journey without issue?}; \node (n14) [process, below of=n13, yshift=-1.2cm] {Rider \& Driver Complete Journey}; \node (n15) [startstop, below of=n14] {End}; \node (n16) [process, below of=n15, yshift=-1.2cm] {Disputing Party Sends Fare Alteration Proposal}; \node (n17) [decision, below of=n16, yshift=-1.2cm] {Other accepts?}; \node (n18) [process, below of=n17, yshift=-1cm] {Driver Formally Proposes Fare Alteration}; \node (n19) [process, below of=n18] {Rider Accepts}; \draw [arrow] (n0) -- (n1); \draw [arrow] (n1) -- (n2); \draw [arrow] (n3) -- (n2); \draw [arrow] (n2) -- (n4); \draw [arrow] (n4) -- (n5); \draw [arrow] (n5) -- node[anchor=south] {No} (n6); \draw [arrow] (n6) |- (n4); \draw [arrow] (n5) -- node[anchor=west] {Yes} (n7); \draw [arrow] (n7) -- (n8); \draw [arrow] (n8) -- node[anchor=south] {No} (n9); \draw [arrow] (n9) |- (n7); \draw [arrow] (n8) -- node[anchor=west] {Yes} (n10); \draw [arrow] (n10) -- (n11); \draw [arrow] (n11) -- (n12); \draw [arrow] (n12.south) |- ++(6cm,-3mm) -- ++(0,19.8cm) -| (n13.north); \draw [arrow] (n13) -- node[anchor=west] {Yes} (n14); \draw [arrow] (n14) -- (n15); \draw [arrow] (n13.east) -- node[anchor=south] {No} ++(4mm,0) |- (n16.east); \draw [arrow] (n16) -- (n17); \draw [arrow] (n17) -- node[anchor=west] {Yes} (n18); \draw [arrow] (n17.west) -- node[anchor=north] {No} ++(-5mm,0) |- (n16.west); \draw [arrow] (n18) -- (n19); \draw [arrow] (n19.west) -- ++(-9mm,0) |- (n13.west); \end{tikzpicture} \caption{Flow diagram of possible routes of interaction with Taxicoin.} \end{figure}
{ "alphanum_fraction": 0.7546199738, "avg_line_length": 47.6143187067, "ext": "tex", "hexsha": "d2848aa3180ec2a8775b5fb7c1d666547392e319", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3f0fd26841992aa47d5e4f6fce56de4b56452f30", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sprusr/taxicoin", "max_forks_repo_path": "docs/tex/rfc.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3f0fd26841992aa47d5e4f6fce56de4b56452f30", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sprusr/taxicoin", "max_issues_repo_path": "docs/tex/rfc.tex", "max_line_length": 643, "max_stars_count": 3, "max_stars_repo_head_hexsha": "3f0fd26841992aa47d5e4f6fce56de4b56452f30", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sprusr/taxicoin", "max_stars_repo_path": "docs/tex/rfc.tex", "max_stars_repo_stars_event_max_datetime": "2021-08-23T01:14:38.000Z", "max_stars_repo_stars_event_min_datetime": "2018-04-23T13:37:47.000Z", "num_tokens": 5499, "size": 20617 }
\begin{algorithm}[H] \caption{Feature Kernel}\label{algo:feature-kernel} \begin{algorithmic}[1] \REQUIRE $inst: \text{MIP instance},\ config: \text{Configuration}$ \STATE {$solutions = build\_solutions^{\ref{algo:build-solutions}}(inst,\ config);$} \label{fk:line:init} \STATE {$dataset = generate\_data\_set(solutions);$} \STATE {$importance = generate\_feature\_importance(dataset); $} \label{fk:line:feature-importance} \STATE {$kernel,\ bucket\_vars = build\_kernel(solutions,\ importance,\ config);$} \label{fk:line:build-kernel} \RETURN {$ (kernl,\ bucket\_vars); $} \end{algorithmic} \end{algorithm} \paragraph{Description} \Cref{algo:feature-kernel} shows an high-level description of the method. On line \ref{fk:line:init} \Cref{algo:feature-kernel} builds the initial solution set, see \Cref{algo:build-solutions} for the details. What meters here is that there is a list of sub models, with relative variable values and feasibility status if the model was at least continuous feasible. This set can be used to generate the dataset, train the Random Forrest and build the feature importance vector (on line \ref{fk:line:feature-importance}). The feature importance vector is then used to split variables into kernel variables and bucket variables. Function $build\_kernel$, on line \ref{fk:line:build-kernel}, builds the initial kernel but does not build the initial buckets, this is done in a separate section of the program. \Cref{algo:feature-kernel} simply implements a new Kernel builder, so it can be used in substitution of the default Kernel builder.
{ "alphanum_fraction": 0.7379646557, "avg_line_length": 71.347826087, "ext": "tex", "hexsha": "30424f9a95a1eb215b9e75a62ea20c31534d1a1b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "157c3775c5ca239000557d18cf3d6bbb3b350e33", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "Optimization-Algorithms/Feature-Kernel", "max_forks_repo_path": "algorithms/feature_kernel.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "157c3775c5ca239000557d18cf3d6bbb3b350e33", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "Optimization-Algorithms/Feature-Kernel", "max_issues_repo_path": "algorithms/feature_kernel.tex", "max_line_length": 168, "max_stars_count": null, "max_stars_repo_head_hexsha": "157c3775c5ca239000557d18cf3d6bbb3b350e33", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "Optimization-Algorithms/Feature-Kernel", "max_stars_repo_path": "algorithms/feature_kernel.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 415, "size": 1641 }
\chapter{5. Real Scale Prototype} Once the dynamic model and the control strategy have been studied, the design and fabrication of the tilting PEV will be explained in this chapter. The frame of this vehicle was recycled from another previous PEV version, and will be the departure point for this new prototype. In this chapter we will cover the processes to design and fabricate all the parts in the vehicle, (front suspension, tilting and steering mechanisms, rear motor, handle bar, batteries, electronic components...) \section{Front Suspension Design} The first step is to design the front suspension of the vehicle. The suspension arms will continue to form a four bar mechanism as were in the miniPEV. In this case, before designing any mechanical component and jumping to the CAD software, some kinematic and dynamic simulations were carried out. These simulations had several goals in mind. \begin{enumerate}\itemsep -10pt \item Understand the motion of the tilting suspension depending on its geometry. \item Maintain the wheels as vertical as possible during the leaning of the body. \item Minimize the required torque from the tilting motor. \end{enumerate} The simulations were developed in MATLAB, following a similar structure to the functions introduced in the book by A. Avello\cite{iturriagagoitia2014teoria}. The analysis starts by defining a model of elements that are linked together by different types of joints. This model is then translated into a set of equations $\Phi$ that define the constraints of the system. Whichever the motion of the mechanism, it must obey these equations. \newpage The kinematic simulations require to solve three problems (position, velocity and acceleration) in that order. Then the dynamic simulation makes use of the results from the kinematic analysis, giving an estimation of the forces and reactions in the system. \subsection{Model Schematic} The first step to a mechanical analysis is to generate a model of elements that represent the system. In this case, the front suspension is abstracted into a 2D model of bars. The process of designing the model is very simple. The user introduces the length of the bars, then the coordinates of all points are calculated from a fixed frame reference and based on those bar lengths. Finally, the estimated mass of each bar is introduced, and all possible external forces are applied as well. For example, the weight of the driver is simulated as a 80kg weight located in a point near to the center of gravity of the vehicle. \begin{figure} \includegraphics[width=1.0\linewidth]{figs/05/sim/1} \caption{Simulated Model} \label{model_schematic} \end{figure} Based on the Autodesk ForceEffect model designed previously, the suspension model was implemented in MATLAB. It has 20 points -- 3 of them fixed, A, B, C -- that define the bars. There are only three types of links in the model: articulations, weld and slider. The constraint equations that define this model can be summarized in these types (the examples have been taken from the model in Figure \ref{model_schematic}): \marginnote{$x_{i}$ is the x coordinate of the point i\\ $y_{i}$ is the y coordinate of the point i\\ $L_{a\,b}$ is the length of the bar $a$-$b$} \begin{itemize} \begin{itemize} \item Bar $\hspace{1.25cm}(x_{1}-x_{3})^{2}+(y_{1}-y_{3})^{2}-L_{1\,3}^{2}=0$ \item Triangle \begin{aligned} (x_{1}-x_{2})^{2}+(y_{1}-y_{2})^{2}-L_{1\,2}^{2}=0\\ \hspace{0.5cm}(x_{1}-x_{9})^{2}+(y_{1}-y_{9})^{2}-L_{1\,9}^{2}=0\\ \hspace{0.5cm}(x_{2}-x_{9})^{2}+(y_{2}-y_{9})^{2}-L_{2\,9}^{2}=0 \end{aligned} \item Welded \begin{aligned} (x_{6}-x_{7})(x_{4}-x_{7})+(y_{6}-y_{7})(y_{4}-y_{7})-L_{6\,7}L_{4\,7}\cos(90)=0\\ \hspace{0.5cm}(x_{6}-x_{7})(y_{4}-y_{7})-(y_{6}-y_{7})(x_{4}-x_{7})-L_{6\,7}L_{4\,7}\sin(90)=0 \end{aligned} \item Slider $\hspace{0.75cm}(x_{6}-x_{B})(y_{B}-y_{C})-(y_{6}-y_{B})(x_{B}-x_{C})=0$ \end{itemize} \end{itemize} Each of these constraint equations is denoted as $\Phi(\vec{g},t)$, since depend in the vector of the coordinates $\vec{g}$ and the time $t$. In Figure \ref{model_schematic} only 16 moving and 3 fixed points have been represented. The model will be completed with the introduction of the tilting actuation and the driver model. In total there will be 20 moving points and 3 relative coordinates (angles), which make a total of \textbf{43 unknown coordinates}. \[\vec{g}=\begin{bmatrix} x_{1}\quad y_{1}\quad x_{2}\quad ... \quad x_{20}\quad y_{20} \quad \theta \quad \gamma \quad \phi \end{bmatrix}_{43x1}\] \[\Phi(\vec{g},t)_{42x1}\] \[\Phi_g(\vec{g},t)_{42x43}\quad with \quad \Phi_g(\vec{g},t)_{i,j}=\frac{\partial \Phi_{i}}{\partial x_{j}}\] Note that the size of the coordinates vector (unknowns) is bigger than the number of equations in the system ($42\rightarrow43$). This is due to the fact that one of the coordinates will be degree of freedom, and therefore a known value. Due to the geometry of the model, it is impossible to put one fixed articulation in each wheel's point of contact with the ground. Instead, one of these two links has to be modified into an articulated slider. Another alternative was the implemented design, with a fixed rotation point and one articulated slider on each wheel. With this decision, the model behavior is completely identical --symmetrical-- on both sides. From now on we will refer as \textbf{inner wheel} to the wheel closer to the center of rotation of the trajectory and as \textbf{outer} to the other one. This notation will be alternated with left and right wheels for inner and outer. \newpage \subsection{Kinematic Simulations} The goal of the kinematic simulations is to find the best geometry for stability during the tilting motion, that is, to design the front suspension so that the inner wheel should maintain as vertical as possible, so the outer wheel. The degree of freedom $z$ of the model is the rotation angle $\phi$ that the body forms with the vertical axis. The vector of coordinates $\vec{g}$ is formed by the $x$ and $y$ coordinates of the 20 points. Other extra variables, angles and distances of interest are also included in the vector $\vec{q}$. As stated previously, to study the motion of a system three problems have to be solved, in this order: \begin{itemize} \begin{itemize} \item Position Problem \marginnote{Position Problem: \\Input $z$; Unknown $g$} Given a value for the degree of freedom $z=\phi$, the vector of coordinates is calculated solving the system of non-linear equations $\Phi$. The Newton-Raphson method is used to solve the equations: \[\Phi(\vec{g}+\Delta\vec{g},t) \approx \Phi(\vec{g},t)+\Phi_{\vec{g}}(\vec{g},t)\Delta\vec{g} \approx 0\] where the $\Phi_{\vec{g}}$ is the Jacobian of the constraint equations. Through a iterative process, the values of the coordinates are calculated: \[\Phi_{\vec{g}}(\vec{g}_{i},t)(g_{i+1}-g_{i})=-\Phi(g_{i},t)\] \item Velocity Problem \marginnote{Velocity Problem: \\Input $g,\dot{z}$; Unknown $\dot{g}$} The derivative of the constraint equations is also null: \[\frac{d}{dt}\Phi(\vec{g},t)=\Phi_{\vec{g}}\,\dot{\vec{g}}+\Phi_{t}=0\] where $\Phi_{t}$ is the partial derivative of the constraint equations with respect to time $t$. The position of the system is known for each time $t$, so the matrix $\Phi_{\vec{g}}$ and $\Phi_{t}$ are known, which gives the values of the velocity $\dot{\vec{g}}$: \[\Phi_{\vec{g}}\,\dot{\vec{g}}=-\Phi_{t}\] \item Acceleration Problem \marginnote{Acceleration Problem: \\Input $g,\dot{g},\ddot{z}$; Unknown $\ddot{g}$} Following the same logic, the vector of accelerations is obtained: \[\frac{d^2}{dt^2}\Phi(\vec{g},t)=\Phi_{\vec{g}}\,\ddot{\vec{g}}+\dot{\Phi}_{\vec{g}}\,\dot{\vec{g}}+\dot{\Phi}_{t}=0\]. If this equation is rearranged, the only unknown is the vector of accelerations $\ddot{\vec{g}}$: \[\Phi_{\vec{g}}\,\ddot{\vec{g}}=-\dot{\Phi}_{\vec{g}}\,\dot{\vec{g}}-\dot{\Phi}_{t}\] \end{itemize} \end{itemize} \subsection{Dynamic Simulations} Once the kinematic problem has been solved, the dynamic of the system can be studied. From the principle of virtual work: \[\delta W_{inertia} + \delta W_{external} =0 \] Applying this theorem in function of the vector of natural coordinates $g$: \begin{equation} \delta \vec{g}^{T}\,M\,\ddot{\vec{g}}+\delta \vec{g}^{T}\,Q=0 \label{dynamic} \end{equation} where $M$ is the mass matrix and $Q$ the vector of generalized forces. \begin{marginfigure} \includegraphics[width=1.0\linewidth]{figs/05/bar} \caption{Element of the model} \end{marginfigure} To build $M$, each element's $M_{e}$ is calculated first, and its inserted in the corresponding cells of the $M$ matrix. \[M_{e}=\begin{bmatrix} m+a-2b_{x} & 0 & b_{x}-a & -b_{y} \\ & m+a-2b_{x} & b_{y} & b_{x}-a \\ & & a & 0 \\ sim. & & & a \end{bmatrix}\] with \[a=\frac{I_{i}}{L_{i\,j}^2} \quad b_{x}=\frac{m\, ^{e}x_{G}}{L_{i\,j}} \quad b_{y}=\frac{m\, ^{e}y_{G}}{L_{i\,j}} \quad m=\int_{V}{dm}\] \[I_{i}=\int_{V}{(^{e}x^{2}+\,^{e}y^{2})dm \quad ^{e}x_{G}=\frac{1}{M}\int_{V}{^{e}x\,dm \quad ^{e}y_{G}=\frac{1}{M}\int_{V}{^{e}y\,dm\] The matrix $Q$ is build in the same way, but accounts for the external forces in the system, for example, the torque from a motor. For any force $F$ in the model: \[Q_{F}=\frac{1}{L_{i\,j}} \begin{bmatrix} L_{i\,j}-\,^{e}x & \,^{e}y \\ \,^{e}y & L_{i\,j}-\,^{e}x \\ ^{e}x & ^{e}y \\ -\,^{e}y & ^{e}x \end{bmatrix} \begin{bmatrix} F_{x} \\ F_{y} \end{bmatrix}\] The matrix $M$ and $Q$ are therefore known. The values of the coordinates vector $\vec{g}$ can be expressed as a function of the degrees of freedom $z$ as $\vec{g}=f(z)$ where \[\dot{\vec{g}}=\frac{\partial f}{\partial z}\dot{z}=R\,\dot{z} \hspace{1cm} \ddot{\vec{g}}=R\,\ddot{z}+\dot{R}\,\dot{z}\] Returning to the equation (\ref{dynamic}): \begin{eqnarray*} \delta \vec{g}^{T}(M\ddot{\vec{g}}-Q)=0\\ \delta z^{T}R^{T}(M\ddot{\vec{g}}-Q)=0\\ R^{T}(M\ddot{\vec{g}}-Q)=0 \\ R^{T}M\ddot{\vec{g}}=R^{T}Q\\ R^{T}MR\ddot{z}=R^{T}Q-R^{T}M\dot{R}\dot{z} \end{eqnarray*} Thus the acceleration of the degrees of freedom is obtained when mass and external forces are applied. \subsection{Torque Calculation} It must be pointed out that in this thesis, the only variable of interest is the torque requirement from the motor. In order to get that variable a different perspective is necessary. Once again from equation (\ref{dynamic}), the Lagrange multipliers ($\lambda$) are introduced. There is one $\lambda$ per constraint equation in the model, and depending on their equation their meaning changes, but overall are related to the reactions between elements. \begin{equation} M\ddot{\vec{g}}-Q+\Phi_{g}^{T}\lambda=0 \label{lagrange} \end{equation} For getting the value of the motor torque, a known tilting angle trajectory $\phi_{c}(t)$ has to be imposed:\begin{equation} \Phi=\phi(t)-\phi_{c}(t) \label{motor_torque} \end{equation} For the sake of the programmer, once the kinematics have been solved, it is quite disturbing to change the system of equations again, introducing a new time depending equation. If only the motor torque is needed, a particular method to get it can be developed. First, let us consider that the tilting angle $\phi$ is part of the coordinates vector $\vec{g}$ as: \[\vec{g}=\begin{bmatrix} x_{1}\quad y_{1}\quad x_{2}\quad ... \quad x_{20}\quad y_{20} \quad \theta \quad \gamma \quad \phi \end{bmatrix}\] It was already verified that: \[M\ddot{\vec{g}}-Q+\Phi_{g}^{T}\lambda=0\] \[\lambda=\Phi_{g}^{H}(M\ddot{\vec{g}}-Q)\] Inserting the equation (\ref{motor_torque}) does not change the terms $M\ddot{\vec{g}}-Q$ in equation (\ref{lagrange}). It only expands the Jacobian $\Phi_{g}$ with a row of zeros and a one:\[\begin{bmatrix}\Phi_{g}\end{bmatrix} \quad\rightarrow\quad \left[\begin{array}{ccccc} & & \Phi_{g} & & \\ \hline 0 & 0 & ... & 0 & 1 \\ \end{array} \right]\] Returning to equation (\ref{lagrange}), the lagrangian terms becomes: \[\begin{bmatrix} \Phi_{g}^{T} \end{bmatrix}\begin{bmatrix}\lambda_{1}\\ \lambda_{2} \\ ... \\ \lambda_{42} \end{bmatrix}\quad\rightarrow\quad \left[\begin{array}{c|c} & 0 \\ & 0 \\ \Phi_{g}^{T}(1:42,:) & ... \\ & 0 \\ \hline \Phi_{g}^{T}(43,:) & 1 \\ \end{array} \right]\begin{bmatrix}\lambda_{1}\\ \lambda_{2} \\ ... \\ \lambda_{42} \\ \lambda_{43} \end{bmatrix}\] Calculating the product of this new lagrangian term and considering that the mass and external forces terms do not change: \[M\ddot{\vec{g}}-Q+\Phi_{g}(1:42,:)^{T}\lambda=0\] \[\Phi_{g}^{T}(43,:) \begin{bmatrix} \lambda_{1} & \lambda_{2} & ... & \lambda_{42} \end{bmatrix}^{T} + \lambda_{43}=0\] \[M_{t}=\lambda_{43}=-\Phi_{g}^{T}(43,:) \begin{bmatrix} \lambda_{1} & \lambda_{2} & ... & \lambda_{42} \end{bmatrix}^{T}\] \newpage \subsection{MATLAB Simulations} \textbf{Model} \begin{figure} \includegraphics[width=1.0\linewidth]{figs/05/sim/Picture2} \caption{Model} \end{figure} The model has a pre-established constraints, due to the selection of some components. That is the reason why there are some fixed dimensions: \begin{itemize} \begin{itemize} \itemsep -15pt \item Wheel Radius \item Vehicle Track \item Kingpin angle \item Hub width and height \item Wishbone arms horizontal in steady state \end{itemize} \end{itemize} The \textbf{degree of freedom} is the body inclination $\phi$, based on the rotation point A, where the motor should be placed. For the kinematic simulations it will be established that: $\phi=(0\,-\,30)\,\degree;\quad \dot{\phi}=1 rad/s;\quad \ddot{\phi}=0 rad/s^{2}$ \textbf{Kinematics} We will study the motion of the system with varying parameters. The only parameters left to determine are the \textbf{position of the motor} --height $h \in (0-600) mm$-- and the \textbf{width of the body in the suspension} --$L_{1\,9},\,L_{2\,10} \in (0-300) mm$--, which will initially cover those ranges. The model is fixed to some dimensions, and its \textbf{tilted to the maximum possible angle}. The inclination of the body and the wheels is recorded and saved. Then both the left and right wheels angle is compared with the body angle. This data is the \textbf{fitted into a linear regression} and its slope and quadratic error are saved for later analysis. \begin{figure*} \includegraphics[width=1.0\linewidth]{figs/05/sim/Relations} \caption{Left (inner) and right (outer) wheels angle versus the body angle during a tilting motion} \end{figure*} After simulating the mechanism with different geometric constraints (54900 combinations), some conditions were applied to find the most suitable geometry: \begin{enumerate}\itemsep -8pt \item Ratio between the inner wheel inclination angle and the body leaning \textbf{$\phi_{inner\,to\,body}< 0.96$} \item The body width should have enough space for connecting the suspension arms \textbf{$L_{1\,9},\,L_{2\,10} > 30 mm$} \item The body should lean a minimum angle ($\phi_{max}>25\degree$) \item The rotation point should be located near to the body, in order to avoid the motor scraping against the ground or being too high. \end{enumerate} Applying these constraints a set of possibilities is obtained: \begin{table}[h!] \centering \begin{tabular}{ccc|ccccc} \\[20pt] h & L_{1\,9} & L_{2\,10} & \phi_{max} & \phi_{left\,to\,body} & RSE_{left\,to\,body} & \phi_{right\,to\,body} & RSE_{right\,to\,body} \\ \hline 260 & 31 & 61 & 0.4500 & 0.9530 & 0.0020 & 1.1560 & 0.0020\\ 260 & 61 & 101 & 0.4500 & 0.9592 & 0.0017 & 1.1397 & 0.0017\\ 250 & 31 & 61 & 0.4500 & 0.9534 & 0.0020 & 1.1598 & 0.0020\\ \textbf{250} & \textbf{61} & \textbf{101} & \textbf{0.4500} & \textbf{0.9596} & \textbf{0.0017} & \textbf{1.1432} & \textbf{0.0017}\\ 240 & 31 & 61 & 0.4500 & 0.9539 & 0.0019 & 1.1432 & 0.0019\\ 240 & 61 & 101 & 0.4500 & 0.9600 & 0.0017 & 1.1643 & 0.0017\\ 230 & 31 & 61 & 0.4499 & 0.9543 & 0.0019 & 1.1730 & 0.0019\\ \hline \end{tabular} %\\[20pt] \caption{Geometries remaining after applying the selection constraints} \label{selection} \end{table} \newpage The selected geometry has been emphasized in the table \ref{selection} and represented in Figure \ref{selectedgeometry}. The motor should be located inside the body, at $230–260$ mm from the ground. The selection of $L_{1\,9}$ is clearly affected by the second constraint, it usually selects the lower bound (31 mm). Regarding the body geometry, the length $L_{1\,9}$ should be around 60 mm, while the length $L_{2\,10}$ should be around 100 mm. These values could be recalculated in case that the 60 mm separation between pins would not be enough. \begin{figure*}[h!] \includegraphics[width=1.0\linewidth]{figs/05/sim/4} \caption{Selected geometry} \label{selectedgeometry} \end{figure*} Other interesting conclusions can be extracted from the kinematic analysis. First, let us look at the value of $\phi_{max}$ (Figure \ref{phimax}) for all the possible combinations. We can notice that increasing the rotating point reduces the cases of high leaning angles. The lower the point A, the higher the leaning angle. In addition, ff the position of the rotating point is maintained constant ($h=constant$), that is, independently of the rotating point height, there exist a line where the dimensions $L_{1\,9}$ and $L_{2\,10}$ give the highest possible leaning angle. This line gives a body with a particular lateral edge. Indeed, the $\bar{12}$ edge would form 55$\degree$ with the horizontal in that optimum case. But reality is that the selection has to be based in a balance, going to the optimum in one dimension will mean worse values in the other dimensions. In Figures \ref{phileft} and \ref{phiright} the values of $\phi_{inner}$ and $\phi_{outer}$ tell that independently from the rotating point height, the regions where there is a low inner wheel to body angle ratio are the same where there is a high outer wheel to body angle ratio. The selected point fall into a balanced zone between these two regions. Finally, in Figure \ref{phirelation} the relation between the inner-wheel-to-body ratio and the outer -wheel-to-body ratio is represented. Out of curiosity, it is impossible to find a geometry in which simultaneously the inner and the outer wheel angle are lower than the body angle. When a ratio is below 1, the other ratio is always above 1, and vice versa. \begin{figure*} \includegraphics[width=1.0\linewidth]{figs/05/sim/phimax} \caption{Value of $\phi_{max}$ in function of $L_{1\,9}$ (x axis), $L_{2\,10}$ (y axis), and $h$ (different figures)} \label{phimax} \end{figure*} \begin{figure*} \includegraphics[width=1.0\linewidth]{figs/05/sim/phileft} \caption{Value of $\phi_{inner}$ in function of $L_{1\,9}$ (x axis), $L_{2\,10}$ (y axis), and $h$ (different figures)} \label{phileft} \end{figure*} \begin{figure*} \includegraphics[width=1.0\linewidth]{figs/05/sim/phiright} \caption{Value of $\phi_{outer}$ in function of $L_{1\,9}$ (x axis), $L_{2\,10}$ (y axis), and $h$ (different figures)} \label{phiright} \end{figure*} \begin{figure} \includegraphics[width=1.0\linewidth]{figs/05/sim/PhiRelation} \caption{Ratio of angles on inclination $\phi_{inner\,to\,body}$ vs $\phi_{outer\,to\,body}$ in function of $L_{1\,9}$ (x axis), $L_{2\,10}$ (y axis). Each point represents a combination of the geometry} \label{phirelation} \end{figure} \textbf{Dynamics} The dynamic analysis has been focused on the estimation of the necessary torque to maintain the body in vertical position. The worst case scenario for the motor is when the user gets on the vehicle. The motor will be able to return to the vertical position only if the torque requirement is lower than the maximum torque of the motor. On the contrary, inn a dynamic situation the forces on the vehicle will help the motor to return to the vertical position after a curve. To carry out the dynamic simulation, the weight of each element is estimated with their material density and approximate dimensions. In addition, the weight of an standard (80kg) driver is include at a height on 1m (around the center of gravity). Then, the model is forced to a known initial position ($\phi$ is the input) and the torque to return to the vertical position is calculated. For this step the expressions presented previously, based on the Lagrangian formulation, is used:\[M_{t}=\lambda_{m}=-\Phi_{g}^{T} \begin{bmatrix} \lambda_{1} & \lambda_{2} & ... & \lambda_{20} \end{bmatrix}\] The model was also slightly modified to include the tilting mechanism. The design B.4 was selected for this analysis, which consisted on a gear system attached between the motor and the lower suspension arms. Two different gear ratios were studied, with an increase of 1.5:1 and 4:1 in the motor torque: \newpage \begin{marginfigure}[5cm] \includegraphics[width=1.15\linewidth]{figs/05/sim/gear15_2} \caption{Studied model with gear ratio=1.5} \end{marginfigure} \begin{marginfigure}[5cm] \includegraphics[width=1.15\linewidth]{figs/05/sim/gear4_2} \caption{Studied model with gear ratio=4} \end{marginfigure} \begin{figure}[h!] \includegraphics[width=0.95\linewidth]{figs/05/sim/gear15_1} \caption{Necessary motor torque to restore vertical position with gear ratio=1.5} \end{figure} \begin{figure}[h!] \includegraphics[width=0.95\linewidth]{figs/05/sim/gear4_1} \caption{Necessary motor torque to restore vertical position with gear ratio=4} \end{figure} For acceptable leaning angles (around 15\degree) there is torque requirement of 50Nm for the 1.5:1 gear ratio and of 15Nm for the 4:1 gear ratio. Let us study the characteristics of the available tilting motor to verify that this conditions are fulfilled. \newpage \textbf{NIDEC 48R Motor} The selected motor is a NIDEC 48R BLDC motor. Brushless DC motors do not require commutators and brushes, and have a long life, quietness, and high efficiency. It has a 144 ratio gear attached, which was customized for high-torque applications. The datasheet of the motor (without the gear) is included in the appendices. The rated power is 67W, with a nominal output torque $T_{r}$ of 0.2Nm and a current consumption of $I_{r}$ of 3.8A. In no load conditions, the output speed is $n_{0}=4460 1/min$, with a current consumption of 0.5A. \begin{figure}[h!] \includegraphics[width=0.85\linewidth]{figs/05/sim/motor} \caption{NIDEC 48R motor curves} \end{figure} \\\6 Considering these two situations --nominal and no load--, the curve torque vs current can be obtained. \[T=m\,I+n; \quad 0=0.5m + n; \quad 0.2=3.8m+n\] \[T=\frac{2I-1}{33}\] With a output ratio of 144 in the planetary gear, and limiting the \textbf{maximum current to 5A}, the maximum torque is \[T_{max}=144\frac{2·5-1}{33}\approx 40 Nm\] With an additional gear, the torque increases to \textbf{60Nm and 160Nm with 1.5 and 4 gears ratio} respectively. Therefore, the \textbf{motor is able to recover} from the considered situations. In the view of the results of the dynamic analysis, the gears between the motor and the suspension arms will be designed to have a \textbf{ratio of 2:1}. \newpage \section{Design} The previous simulations have offered a better understanding of the motion of the front suspension and have helped to complete its geometry. In this section we will go through the design process for the different parts of the PEV. \subsection{Tilting} \begin{marginfigure}[5.5cm] \includegraphics[width=1\linewidth]{figs/05/IMG_20161220_154842} \caption{Aluminum part to connect the frame and the suspension arms. The geometry fits with the output from the kinematic simulations (part upside down) } \end{marginfigure} For the tilting part some components were reused from other vehicles (wheels, suspension arms and hubs). The wheels are not perfectly suited for tilting vehicles, due to their narrow width. On the other side, the hubs determine the height of the suspension points in the frame, since in steady position the suspension arms are intended to remain parallel to the ground. The key point in the design of the tilting suspension was the selection of ball joints as articulations. Super-swivel ball joint rod ends were selected due to the 55$\degree$ angle of ball swivel, being able to accommodate more misalignment than any other externally threaded rod end. This high angle allowed the tilting of the vehicle up to 30$\degree$. The rest of the joining components were purchased at McMaster.com --their cost has been summarized in the Ch.7: Cost Summary--, and their datasheets have been included in the appendices. The joining of the suspension arms and the frame was carried out by a aluminum part made of water jetted sheets of 1/8'=3.125 mm. This part followed the geometry extracted from the MATLAB simulations, having 60 mm and 100 mm between the upper and lower suspension points respectively. \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/IMG_20161219_165930} \caption{Initial state of the PEV frame} \end{figure} \newpage The gears were designed using a design table from excel\cite{gear} that was then imported to Solidworks. This table took into account all the parameters of the gears. As stated previously, the ratio of the gears was 2:1, with a width of 1/4'=6.35 mm and were fabricated with the water jet machine. The gears were inserted into the suspension arms by cutting them in half. This issue was due to the fact that the suspension arms were already fabricated and it was not possible to insert the gears in their position without breaking some parts. \begin{table}[h!] \centering \begin{tabular}{lll} \hline Half Width & h & 0.785 \\ Addendum & a & 1 \\ Dedendum & b & 1.25 \\ Fillet Radius & e & 0.38 \\ Module & m & 1 \\ Teeth & z & 20 \\ Profile Shift & s & 0.25 \\ Pressure Angle & $\alpha$ & 0.349 \\ Pitch Radius & R & 10 \\ Base Radius & R_{0} & 9.397 \\ Addendum Radius & r_{a} & 11 \\ Half Angle & $\gamma$ & 0.093 \\ Fillet Center & v_{C} & -0.87 \\ Fillet Center & u_{C} & 1.506 \\ \hline \end{tabular} %\\[20pt] \caption{Gear parameters} \end{table} \begin{marginfigure}[-2.75cm] \includegraphics[width=1\linewidth]{figs/05/involute} \caption{Gear Design: Addendum, Involute, Trochoidal and Dedendum sections} \end{marginfigure} But far from meaning a problem, being able to put and remove the gears allowed to test the vehicle with and without tilting\ref{P1050722}. This modular advantage probed to be really useful, disassembling the gear and replacing a pair of shock absorbers allowed to test the vehicle with no tilting in a very straightforward way. The frame was first modelled in Solidworks, as the baseline of the rest of the components. The non relevant parts were imported from online resources as GradCad, for example, the wheels. Once the frame was in Solidworks, the rest of the parts were designed and fabricated in the Media Lab's machine shop. The assemble of the parts did not imply any problem. At this stage there was not steering system, so the wheels could rotate freely. This complicated a bit the tilting tests, since the wheels started rotating when the suspension moved. Nevertheless, the tilting mechanism worked satisfactorily and the inclination angles were as high as expected. The NIDEC motor was controlled with a driver connected to the Arduino board. The control strategy uses a PWM signal to command the speed of the motor and the position is controlled with a PID. In the electronics section a deeper explanation is included. \begin{marginfigure}[-5cm] \includegraphics[width=0.95\linewidth]{figs/05/IMG_20161231_124145} \caption{Top view: PEV tilting, first test without steering } \end{marginfigure} \newpage \begin{figure}[h!] \includegraphics[width=0.95\linewidth]{figs/05/IMG_20161231_124120} \caption{Front view: PEV tilting, first test without steering} \\[-0.5cm] \end{figure} \begin{figure}[h!] \includegraphics[width=0.95\linewidth]{figs/05/IMG_20161230_212159} \caption{Tilting mechanism: motor and gears} \\[-0.5cm] \end{figure} \begin{figure}[h!] \includegraphics[width=0.95\linewidth]{figs/05/P1050722} \caption{Front suspension without tilting} \label{P1050722} \\[-0.5cm] \end{figure} \begin{figure}[h!] \includegraphics[width=0.95\linewidth]{figs/05/Render_Design_B_2} \caption{Render of the front suspension} \\[-1cm] \end{figure} \newpage \subsection{Steering} \begin{marginfigure}[2cm] \includegraphics[width=1.1\linewidth]{figs/05/P1050723} \caption{Steering mechanism, NIDEC motor and VESC controller} \label{P1050723} \end{marginfigure} During the fabrication process it was decided to implement a steer-by-wire system. There were two main reasons to justify this system: \begin{itemize} \begin{itemize} \item \textbf{SDTC}: by introducing a motor to control the steering mechanism, there is the possibility of changing the steer angle from the driver. The SDTC control strategy requires the control of the wheels to modify the path of the vehicle and tilt the vehicle by countersteering at high speeds. Even though in this project there was no time for implementing the SDTC, the steer-by-wire system remains useful for future applications of this vehicle. \item \textbf{Autonomy}: an actuated steering system is a basic feature for an autonomous lightweight vehicle. If a fleet of PEV is on the streets and a user calls one of them, it will need of a motorized propulsion and steering as well. In a reduced manner, in this project it was possible to remotely control the PEV and test it without a driver. \end{itemize} \end{itemize} The main drawback of the steer-by-wire system is that it requires to be constantly powered by the batteries. When the vehicle is powered off, the steering motor remains in the same position, until is powered again and the handle bar and the wheels align. Arduino boards do not have save any data after they are powered off. This fact implies that if the handle bar is moved during a no-power period, there will be a misalignment between the wheels and the handle bar. To avoid this problem, the absolute orientation of the IMU is used. By configuring the IMU properly, the orientation with respect to an inertial frame can be obtained. In case that the vehicle is rotated, there will also be a misalignment again, so two identical IMUs were necessary. The relative angle between them will report to the motor the initial angle of the wheels. In the Figure \ref{P1050723}, the position of the motor and the steering mechanism is represented. Some modifications were made to the aluminum frame to allocate the motor. The output shaft goes through the frame, connected to another aluminum part. This part (Figure \ref{IMG_20170403_011512}) had several holes to adapt to the rest of attachments. The white case was 3D printed and contains the VESC controller. \begin{figure}[h!] \includegraphics[width=0.7\linewidth]{figs/05/IMG_20170403_011512} \caption{Steering parts} \label{IMG_20170403_011512} \\[-5cm] \end{figure} \newpage \subsection{Handle Bar} Before approaching the design of the handle bar, it was necessary to build a column that supported it. The frame did not have any prepared holes or location for the steering column, so everything had to be designed from scratch and adapted to assure a good joining between the frame and the steering column. \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/Render_Design_B_7} \caption{Handle bar and steering column render} \\[-1cm] \end{figure} The steer-by-wire system allowed a free design of the column, formed by two lateral aluminum sheets connected by thinner transversal sheets. These parts were water jetted, so a distinctive shape was selected for them. While the fabrication resulted really fast and easy, assembling all these parts required some time. The assembly was really tedious, requiring some clamps to put all the parts together (Figure \ref{IMG_20170207_154911}). \begin{marginfigure}[-3cm] \includegraphics[width=1\linewidth]{figs/05/IMG_20170207_154911} \caption{Assembly of the handle bar column} \label{IMG_20170207_154911} \end{marginfigure} At the bottom of this column there was enough room for allocating the wiring and the Arduino boards. The attachment to the frame was done with a bended aluminum part. It is important to point out that it was decided to fabricate the PEV without using the milling machine. Apart from the time that the milling process takes by itself, a training and a preparation of the necessary files for its production were necessary. \begin{marginfigure}[0cm] \includegraphics[width=1\linewidth]{figs/05/P1050738} \caption{Handle bar: motor joining} \label{P1050738} \end{marginfigure} On top of the column another NIDEC motor was assembled; its shaft had the handle bar connected (Figure \ref{P1050738}). The reasons to introduce another motor in the handle bar are summarized as follows: \begin{itemize} \begin{itemize} \item \textbf{Haptic Feedback}: a fundamental part of the steer-by-wire system is to give feedback to the driver about the forces that the steering motor is withstanding. In this way, the driver will have a subconscious input about the forces required to move the wheels, and the user experience will be enhanced. \newpage This is also important in term of safety. Moving the handle bar without any friction can be dangerous, since the driver can make sudden turns, leading to a crash or fall. The implemented feedback was not a realistic input from the forces happening in the steering motor. Instead, the force to move the handle bar linearly increased with the steering angle and the vehicle velocity. There was also a limit in the maximum input angle, generating enough force to block any motion. To do so, the motor variables read from the VESC controller were used, mainly the battery current, the motor current and the tachometer. \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/IMG_20170123_121416} \caption{Test bench for the haptic feedback} \\[-1cm] \end{figure} At low speeds, the angle range was quite wide --but limited-- and the forces required to move the handle bar were low. At higher speeds, the handle bar was very limited to a low range of angles and also it required a lot of force to move the handle bar. This helped to reduce the speed of steering, and protected the user to exceed the steering input at high speeds. \item \textbf{Alert/notify user}: Apart from controlling the steering input from the driver, a motorized handle bar can warn the driver about an obstacle in the road or any other issues with the vehicle or the road conditions. Vibrating the handle bar can the fastest way to notify the user about any problem, reducing the time of reaction and thus protecting the user. \item \textbf{Compass}: A possible scenario when riding the PEV can be the indication to go right or left when an address has been indicated to the system. Sometimes when riding a bike in a new city or in a unknown neighborhood it is necessary to stop an check the map to orient ourselves and find the next spot. \end{itemize} \end{itemize} \newpage The handle bar contains a grip and a brake on each side and a potentiometer on the right side (Figure \ref{handle_bar}). The left and right brakes activate the brakes on the left and right front wheels respectively, and the potentiometer activates the power assist on the rear motor. A higher value of the potentiometer means a higher duty cycle in the rear motor, thus increasing the velocity of the vehicle. \begin{marginfigure} \caption{Handle bar} \label{handle_bar} \end{marginfigure} \begin{figure}[h] \minipage{0.5\textwidth} \includegraphics[width=1.0\linewidth]{figs/05/P10507342} \captionof{a)}{ Left grip and brake} \endminipage\hfill \hspace{1pt} \minipage{0.5\textwidth} \includegraphics[width=1.0\linewidth]{figs/05/P10507332} \captionof{b)}{ Right grip, brake and throttle} \endminipage \\[0pt] \end{figure} \textbf{Driver ergonomics} The height and the position of the handle bar was designed to follow the indications in the book "The Guide to Cycling Ergonomics" by Ergotec\cite{ergonomics}. The angles of the driver's articulations have been included in the drawings appendix. \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/driver2} \caption{Driver position on the CAD model} \\[0cm] \end{figure} \newpage \subsection{Power Assist} The PEV is power assisted by a brushless motor located in the rear wheel's hub. This motor is a 36V E-Bikeling 500W geared motor\cite{ebikeling}. It provides enough power to assist the propulsion of the PEV. The system comes prepared to attach a sprocket to the hub, so that the rear wheel can be moved with the pedals. All the drivetrain parts were mounted in the frame. The rear brushless motor, as every NIDEC motor (tilting, steering, handle bar), are controlled by VESC. In the next section --electronics-- more detailed information is included. The PEV is intended to be a lightweight vehicle fully prepared to incorporate a modular autonomous package. This kit will transform a normal three wheeler vehicle into an autonomous urban vehicle. That is why a motor to propulse the vehicle is necessary, as well as another motor to steer the front wheels. \begin{marginfigure}[0cm] \includegraphics[width=1\linewidth]{figs/05/Sprocket} \caption{PEV sprocket} \end{marginfigure} The motor will assist under the user demands. In the right side of the handle bar there is a potentiometer that will throttle the vehicle when turned on. Regarding the signal flow, the potentiometer sends the command to the Arduino Mega, which is connected to the VESC controller through serial. The VESC finally controls the motor and moves the rear wheel. \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/P1050729} \caption{Rear motor in the hub} \end{figure} \begin{marginfigure}[-6cm] \includegraphics[width=1\linewidth]{figs/05/P1050742} \caption{PEV Pedal system: chain, gears and pedals} \end{marginfigure} \newpage \subsection{Renders and Pictures} \begin{figure}[h!] \includegraphics[width=1.15\linewidth]{figs/05/Render_Design_B_3} \caption{Isometric render} \\[-1cm] \end{figure} \begin{figure}[h!] \includegraphics[width=1.1\linewidth]{figs/05/Render_Design_B_4} \caption{Lateral render} \\[-1cm] \end{figure} \begin{figure}[h!] \includegraphics[width=1.15\linewidth]{figs/05/Render_Design_B_6} \caption{Front render} \\[-1cm] \end{figure} \newpage \begin{figure*}[h!] \includegraphics[width=0.9\linewidth]{figs/05/CreamBox0150} \caption{PEV components explosion} \end{figure*} $ $ \begin{figure*}[h!] \includegraphics[width=0.95\linewidth]{figs/05/P10507152} \caption{PEV} \end{figure*} %\begin{figure}[h!] % \includegraphics[width=1\linewidth]{figs/05/CreamBox0210} % \caption{Render} % \\[-1.5cm] %\end{figure} %\begin{figure}[h!] % \includegraphics[width=1\linewidth]{figs/05/QuietRoom5} % \caption{Render} % \\[-1.5cm] %\end{figure} \newpage \section{Electronics} Electronics lay the foundation for a mechatronical project. In this section the used components are presented and the process to implement them correctly in the PEV is explained. \subsection{PID Control} Before using the VESC controller --open source, highly modifiable electronic speed controller ESC by Benjamin Vedder--, a NIDEC driver was used to control the angular position of the tilting motor. This happened in an early stage part of the project, so that the PEV final version was designed with VESC controllers. The driver had 4 pins to control the motor: \begin{itemize} \begin{itemize} \item \textbf{PWM}: Pulse width modulation signal, it determines the speed of the motor. It is considered as a integer between 0 and 255. The higher PWM, the faster the motor rotates. Connected to a PWM digital output pin in Arduino. \item \textbf{CCW}: Binary variable to select the direction of rotation (clockwise or counter clockwise). Connected to a digital pin. \item \textbf{FG}: Frequency Generator, it outputs a signal with a frequency proportional to the motor speed. The proportional constant was unknown, it was roughly estimated. \item \textbf{GND}: ground connected to the GND port in Arduino. \end{itemize} \end{itemize} This driver was used to control the angular position of the tilting motor. The input to the motor will be the value of PWM, that will make the motor rotate at a certain speed. The speed will be read by the pulses coming through the FG pin. Since there are not position readings, the position will be calculated based on the speed and the frequency of the control. Before reviewing the control system in detail, it is important to make a distinction between the temporal and the Laplace domains. The control diagram represents the Laplacian domain, where the time variable $t$ is replaced by the complex variable (frequency $s$) by applying the Laplace transform. A variable in lowercase ($n$) will belong to the temporal domain, whereas a variable in uppercase ($N$) will belong to the Laplacian domain. \newpage \begin{figure*}[h!] \includegraphics[width=1\linewidth]{figs/05/own/model_own_1} \caption{PID control of the angular position} \label{model_own_1} \end{figure*} The control diagram in Figure \ref{model_own_1} represents the inputs and outputs of this simple position control. The angular reference $\phi_{ref}$ is given by the user -- at first through a potentiometer-- and the error $e$ is calculated between the reference and the actual angle $\phi$. This error will be used to determine the input signal $pwm$ using a PID control. Finally, the $pwm$ will put an specific speed and therefore a position in that timestamp. Translating these words into equations, the system equation is expressed as \[\phi_{i}=\phi_{i-1}+60n(t_{i}-t_{i-1})\] where $\phi_{i}$ is the angular position at time $t_{i}$ and $n$ is the speed in rpm. The input variable $pwm$ is calculated from the error (defined as $E=\Phi_{ref}-\Phi$) by means of the PID control: \[pwm=K_{P}(\phi-\phi_{ref})+K_{I}\int_{t_{i-1}}^{t_{i}}(\phi-\phi_{ref}) dt + K_{D}\frac{\partial (\phi-\phi_{ref})}{\partial t}\] In the Laplacian domain: \[PWM=(K_{P}+\frac{K_{I}}{s}+K_{D})(\Phi_{ref}-\Phi)\] The speed is the time derivative of the position with respect to the time: \[n =\frac{\delta\phi}{\delta t}\,\frac{1\,rev}{2\pi\,rad}\frac{60\s}{1\,min}\quad\rightarrow\quad N=s\,\Phi \frac{60}{2\pi}\] \newpage \begin{marginfigure}[0cm] \includegraphics[width=1.2\linewidth]{figs/05/own/polynomial} \caption{Angular speed N vs signal PWM: Polynomial fitting} \label{polynomial} \end{marginfigure} \begin{marginfigure}[0cm] \includegraphics[width=1.2\linewidth]{figs/05/own/linear} \caption{Angular speed N vs signal PWM: Saturation and linear fitting} \label{linear} \end{marginfigure} The relation between the input $pwm$ and the output speed $n$ is not known, but it can be estimated. Using the speed information from the FG pin, some measurements were made to characterize the speed of the motor in function of the $pwm$ input. In Figure \ref{polynomial} the obtained data is represented and fitted into a 5th grade polynomial curve. \[N=f(PWM)\] In Figure \ref{linear}, on the contrary, the speed-pwm relation is simplified by a linear regression (the speed is saturated at $pwm=57$) \[N=K\,\,PWM\] Therefore, the speed is represented in function of the error: \[N=s\,\Phi \frac{60}{2\pi}=f(PWM)=f\big((K_{P}+\frac{K_{I}}{s}+K_{D})(\Phi_{ref}-\Phi)\big)\] For the sake of simplicity, the transfer function is indicated for the relation $N=K\,PWM$ \[\Phi=\Phi_{ref}\frac{K_{P}\,s+K_{I}+K_{D}\,s^2}{\frac{60s^2}{2\pi\,K}+K_{P}\,s+K_{I}+K_{D}\,s^2}\] The control strategy was implemented in MATLAB and the PID tuner turned out to be really useful to determine the values of $K_{P}$, $K_{I}$ and $K_{D}$. Overall the quality of this \textbf{PID control was satisfactory}, but the VESC controller resulted easier to implement and besides it provided more feedback from the motor (electrical and mechanical variables). \begin{figure}[h!] \includegraphics[width=0.85\linewidth]{figs/05/own/IMG_20161231_132543} \caption{Schematic of the early motor controller: Arduino UNO, 9V battery, potentiometer and NIDEC driver} \\[-10cm] \end{figure} \newpage \subsection{VESC} The VESC is a fully customizable open source electronic speed controller by Benjamin Vedder, designed for lightweight and compact applications like skateboards, or in this case, a three wheeler vehicle. \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/vesc} \caption{VESC Picture} \\[-1cm] \end{figure} The controller receives the power from the batteries and outputs the 3 phases $U$, $V$ and $W$ to the brushless motors. The incorporated firmware offers various possibilities to control the motor in different ways. Every motor in this PEV was configured to be controlled in FOC (Field Oriented Control) sensorless mode. \textbf{FOC -- Field Oriented Control} BLDC motors require a controller that converts the applied DC from the battery cells into AC to drive the motor. This task demands complex driving algorithms to commutate the coils in a sequence that achieves the desired directional rotation. A wide range of control algorithms are available: \begin{itemize} \begin{itemize} \item Trapezoidal control: For each of the 6 commutation steps, a pair of windings are powered, leaving the third disconnected. This method generates high torque ripple, leading to vibration, noise, and poor performance. \item Sinusoidal control: it supplies sinusoidal varying current to the 3 windings, thus reducing the torque ripple and offering a smooth rotation. However, these time-varying currents are controlled using basic PI regulators, which lead to poor performance at higher speeds. \item Field Oriented Control: the torque and the flux can be controlled independently and provides faster dynamic response. There is no torque ripple and smoother, accurate motor control can be achieved at low and high speeds. \end{itemize} \end{itemize} \textbf{VESC Library} \marginnote{ \begin{tabular}{c} \textbf{bldcMeasure struct} \\[1.5pt] \hline \\[-1.5pt] Average Motor Current \\[1.5pt] Average Input Current \\[1.5pt] Duty Cycle \\[1.5pt] Motor RPM \\[1.5pt] Input Voltage \\[1.5pt] Amperes Hours \\[1.5pt] Amperes Hours Charged \\[1.5pt] Tachometer \\[1.5pt] Tachometer Absolute \\[1.5pt] \hline \end{tabular} } The VESC was connected to the Arduino MEGA board through serial connection (UART). To facilitate the control of the motor, the VescUartControl library was used in Arduino to interface over UART with the VESC. The margin table summarizes the available data from the VESC. In addition, it was possible to set the motor current, the brake current, the angular position, the duty cycle and the RPM of the motor. \hspace{1cm} \begin{lstlisting}[style=codedef] @@bool VescUartGetValue(struct bldcMeasure& values, int num);@@ %\textit{Sends a command to VESC and stores the returned data}% @@values@@(struct bldcMeasure&) - bldcMeasure struct with received data @@num@@(int) - the serial port in use (0=Serial; 1=Serial1; 2=Serial2; 3=Serial3;) @@return@@(bool) - true if success @@void VescUartSetCurrent(float current, int num);@@ %\textit{Sends a command to VESC to control the motor current}% @@current@@(float) - the current for the motor @@num@@(int) - the serial port in use (0=Serial; 1=Serial1; 2=Serial2; 3=Serial3;) @@void VescUartSetCurrentBrake(float brakeCurrent, int num);@@ %\textit{Sends a command to VESC to control the motor brake}% @@brakeCurrent@@(float) - the current for the brake @@num@@(int) - the serial port in use (0=Serial; 1=Serial1; 2=Serial2; 3=Serial3;) @@void VescUartSetPosition(float position, int num);@@ %\textit{Sends a command to VESC to control the motor position}% @@position@@(float) - the position in degrees for the motor @@num@@(int) - the serial port in use (0=Serial; 1=Serial1; 2=Serial2; 3=Serial3;) @@void VescUartSetDuty(float duty, int num);@@ %\textit{Sends a command to VESC to control the motor duty cycle}% @@duty@@(float) - the duty cycle for the motor @@num@@(int) - the serial port in use (0=Serial; 1=Serial1; 2=Serial2; 3=Serial3;) @@void VescUartSetRPM(float rpm, int num);@@ %\textit{Sends a command to VESC to control the motor rotational speed}% @@rpm@@(float) - the revolutions per second for the motor @@num@@(int) - the serial port in use (0=Serial; 1=Serial1; 2=Serial2; 3=Serial3;) \end{lstlisting} \textbf{VESC Features} \begin{marginfigure} \includegraphics[width=0.65\linewidth]{figs/05/BLDC_41} \caption{Front VESC Schematic} \end{marginfigure} \begin{marginfigure} \includegraphics[width=0.65\linewidth]{figs/05/BLDC_42} \caption{Back VESC Schematic} \end{marginfigure} \begin{itemize} \begin{itemize}\itemsep -10pt \item Voltage $8V$ to $60V$ \item Current up to 240A for a some seconds or 50A continuous \item 5V 1A output for external electronics (arduino) \item Sensored and sensorless FOC, BLDC, and DC \item Current and voltage measurement on all phases \item Duty-cycle control, speed control or current control \item Interface to control the motor: PPM signal, analog, UART, I2C, USB or CAN-bus. \item Regenerative braking \item Good start-up torque in the sensorless mode \item The motor is used as a tachometer, which is good for odometry \item Adjustable protection against low/high input voltage and high motor/input current. \end{itemize} \end{itemize} It is possible to plot the currents in the BLDC tool, voltages and the duty cycle in real-time. This is useful when debugging how everything behaves. Some screenshots of the configuration GUI (BLDC Tool): \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/RT_Data} \caption{BLDC Tool} \end{figure} In order to protect the VESC from hazard and avoid any undesired contacts, some cases were 3D printed. The first version was designed to allocate only the board, whereas the second version had enough space to also incorporate the VESC capacitors. \begin{marginfigure}[-4cm] \includegraphics[width=0.9\linewidth]{figs/05/vesc_case_3} \caption{VESC case version 1} \end{marginfigure} \begin{marginfigure} \includegraphics[width=0.9\linewidth]{figs/05/vesc_case_6} \caption{VESC case version 2} \end{marginfigure} \newpage \subsection{Rotary Encoder} The longitudinal velocity of the PEV $V=V_{x}$ is obtained from a rotary encoder located in the rear wheel. This sensor is completely necessary, since the motor is not able to provide this information. The motor and the hub of the rear wheel are disentangled, meaning that the motor freely rotates inside the hub when the user pedals, for example. That is why the motor cannot give a feedback of the velocity of the wheel. The rotary encoder or transmitter\cite{rotary} is a incremental optical encoder, that converts the motion to an electrical signal to indicate the position of the rear wheel. Transmitters must be used with a controller that has quadrature detection to get a 4X resolution increase and meet IP50 for protection from dust. The code disk inside a quadrature encoder contains two tracks usually denoted Channel A and Channel B. These tracks or channels are coded ninety electrical degrees out of phase and this is the key design element that will provide the quadrature encoder its functionality. In applications where direction sensing is required, a controller can determine direction of movement based on the phase relationship between Channels A and B. As illustrated in the figure below, when the quadrature encoder is rotating in a clockwise direction its signal will show Channel A leading Channel B, and the reverse will happen when the quadrature encoder rotates counterclockwise. \begin{marginfigure} \includegraphics[width=1\linewidth]{figs/05/encoder} \caption{Quadrature Encoder} \end{marginfigure} The resolution of this particular encoder is of 1000 counts per revolution. It has a 1.91"=48.5 mm diameter circumference polyurethane wheel attached to the shaft of the encoder. To get the speed of the PEV, a simple kinematic study was done, following the diagram in Figure \ref{encoder_2}: \[\vec{v}_{0}=0 \quad \vec{v}_{A}=V\,\vec{i}=w_{1}\,R_{1}\,\vec{i} \quad \vec{v}_{P}=\vec{v}_{A}+w_{1}\,R_{1}\] \[\vec{v}_{B}=V\,\vec{i}=w_{2}\,R_{2} \quad \vec{v}_{P}=\vec{v}_{B}+w_{2}\,R_{2}\] Equaling the expression for the P point velocity $\vec{P}$: \[\vec{v}_{A}+w_{1}\,R_{1}=\vec{v}_{B}+w_{2}\,R_{2}\] Since the velocities in the centers of both solid is the same ($\vec{v}_{A}=\vec{v}_{B})$): \[w_{1}\,R_{1}=w_{2}\,R_{2}\] and therefore, \[V\,\vec{i}=w_{1}\,R_{1}=w_{2}\,R_{2}\] For calculating the speed of the vehicle, the angular velocity and the radius of the small circumference attached to the encoder is only needed. \begin{marginfigure}[-5cm] \includegraphics[width=1.15\linewidth]{figs/05/encoder_2} \caption{Wheel -- Encoder diagram} \end{marginfigure} The radius is known, and the angular velocity can be defined as: \[w_{2}=\frac{\Delta\phi}{\Delta t}; \quad \Delta\phi=N_{counts}·\frac{1\,rev}{1000\,counts}·\frac{2\pi\,rad}{1\rev}\] The speed is therefore calculated as: \[V=N_{counts}·\frac{2\pi R_{1}}{1000\Delta t}\] Every time that the encoder sends a pulse to the board, an interrupt routine starts to read that pulse and catch the rising or falling edge on the input pin. Therefore, if the Arduino is not fast enough, interrupting the microprocessor in the board can deny the reading of other pulses and considerably delay other operations. Taking into account that the resolution was really high (1000 counts/rev), and a radius of the encoder was 48.5/2=24.25 mm if the PEV is going at $1m/s$, for example, that means that in 1 second, the Arduino board needs to be able to interrupt its code more than 6500 times (6500 Hz). Increasing the speed of the vehicle increases this frequency as well. To get the number of counts in a given time interval, it was necessary to implement a very fast digital reader in the Arduino Mega in order to not lose any count and estimate correctly the speed of the vehicle. In addition, this strategy works even better if a bit of hardware debouncing is forced on the rotary encoder. With two 0.1 uF capacitors soldered to the encoder pins, the number of calls to the interrupt routine is dramatically reduced. This is important because too many calls to the interrupt routine will rob computing cycles from the main routine, negating the effects of using interrupts to save computing power. \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/P1050728} \caption{Rotary encoder mounted on the PEV} \\[-2cm] \end{figure} \newpage \subsection{IMU} \begin{marginfigure}[1cm] \includegraphics[width=1\linewidth]{figs/05/bno055} \caption{Bosch BNO055 9DOF IMU} \end{marginfigure} The selected inertial motion unit was the Bosch BNO055, a 9-axis absolute orientation sensor with sensor fusion. The BNO055 integrates a triaxial 14-bit accelerometer, a triaxial 16-bit gyroscope with a range of 2000 degrees per second, a triaxial geomagnetic sensor and a 32-bit microcontroller. It is connected through I2C (clock and data pins) to the analog pins in the Arduino. The PEV is equipped with a pair of BNO055, both connected through I2C, with the difference that the second IMU's ADC pin is connected to the board as well. Setting the ADR pin to high changes the I2C address from the default (0x28 to 0x29). \marginnote{VIN: 3.3-5.0V power supply input \\ GND: common for power and logic \\ SCL - I2C clock pin \\ SDA - I2C data pin \\ ADR: to change the I2C address \\ } Rather than spending a lot of time with algorithms of varying accuracy and complexity, the data can be extracted very easily thanks to the sensor fusion. However, it requires to configure it properly when powered. \begin{enumerate}\itemsep -10pt \item The operation mode has to be setup to be able to configure it (CONFIG MODE) \item Limit accelerometer range to 2G, get better accuracy \item Calibrate using some offsets values obtained previously \item The operation mode is setup again to Fusion mode with NDOF, mode from which the absolute orientation can be obtained. \end{enumerate} \textbf{Calibration} To obtain correct values from the IMU, it has to be properly calibrated first. Once the device is calibrated, the calibration data will be kept until the BNO is powered off. The BNO doesn't contain any internal EEPROM, so a new calibration will be needed every time the device starts up, or a manual restore of the calibration data. The BNO055 includes internal algorithms to constantly calibrate the gyroscope, accelerometer and magnetometer. The four calibration registers -- an overall system calibration status, as well individual gyroscope, magnetometer and accelerometer values -- will return a value between '0' (uncalibrated data) and '3' (fully calibrated). The sensors are trimmed to tight offsets, meaning valid data is obtained even before the calibration process is complete, but particularly in NDOF mode any data should be discard as long as the system calibration status is 0. The reason is that system cal '0' in NDOF mode means that the device has not yet found the north pole, and orientation values will be off. The heading will jump to an absolute value once the BNO finds magnetic north. To generate valid calibration data, the following criteria should be met: \begin{itemize} \begin{itemize}\itemsep -10pt \item Gyroscope: the device must be standing still in any position \item Magnetometer: normal movement of the device is sufficient \item Accelerometer: the BNO055 must be placed in 6 standing positions for +X, -X, +Y, -Y, +Z and -Z. \end{itemize} \end{itemize} \textbf{Position of the IMU} \begin{marginfigure}[0cm] \includegraphics[width=1.2\linewidth]{figs/05/IMU_Overall} \caption{Location of IMU A and B} \end{marginfigure} The PEV is equipped with a pair of IMUs, one placed on the handle bar and another one fixed on the frame. The justification for this decision lays on two arguments: \begin{itemize} \begin{itemize} \item The relative angle between the frame and the handle bar can be obtained, meaning that an encoder is not needed in the handle bar. \item The perceived lateral acceleration $a_{per}$ has to be obtained along the lateral axis $y'$, which should not be affected by the handle bar steering. \item The steering angle $\delta$ and its rate $\dot{\delta}$ can be also obtained from the gyroscope \end{itemize} \end{itemize} \textbf{Orientation of the IMU} \begin{marginfigure}[5cm] \includegraphics[width=1.1\linewidth]{figs/05/Euler} \caption{Quaternion vector diagram} \end{marginfigure} Due to the fact that each IMU is oriented differently, it is necessary to process the orientation data depending on the case. The angular orientation from each IMU is obtained from the quaternion: \[\vec{q}=\begin{bmatrix} q_{w} \\ q_{x} \\ q_{y} \\ q_{z} \end{bmatrix}=\begin{bmatrix} \cos(\alpha/2) \\ \sin(\alpha/2)\cos(\beta_{x}) \\ \sin(\alpha/2)\cos(\beta_{y}) \\ \sin(\alpha/2)\cos(\beta_{z}) \end{bmatrix}\] where $\alpha$ is a simple rotation angle and $\cos(\beta_{x})$, $\cos(\beta_{y})$ and $\cos(\beta_{z})$, are the direction cosines locating the axis of rotation. The quaternion is then transformed into the appropriate Euler angles: Roll, Pitch and Yaw. Each of these angles is related to one of the axis X, Y and Z. Depending on the order of the rotations --XYZ and ZYX Euler angles are not the same-- the values of the roll pitch and yaw changes. For transforming the quaternion into the corresponding Euler angles, the motion of each IMU has to be taken into account, as well as their relative orientation. \newpage The handle bar and the frame IMU's were placed in different orientations: \begin{itemize} \begin{itemize} \item In the handle bar IMU the angle of interest is the steering angle $\delta$ around the Z axis. In order to obtain it independently from any other rotation, the quaternion is transformed into the YZX euler angles \[\delta_{A}=\arctan \frac{-2(q_{y}\,q_{z}-q_{w}\,q_{x)}}{q_{w}^2-q_{x}^2+q_{y}^2-q_{z}^2}\] \item In the frame IMU the angles of interest are the steering angle $\delta$ and the tilting angle $\theta$ around the Y and the X axis respectively. Therefore, the transformation is done to obtain the XZY angles \[\theta=\arcsin\big(-2(q_{x}\,q_{z}-q_{w}\,q_{y})\big)\] \[\delta_{B}=\arctan\frac{2(q_{x}\,q_{y}+q_{w}\,q_{z})}{q_{w}^2+q_{x}^2-q_{y}^2-q_{z}^2}\] \end{itemize} \end{itemize} %\begin{marginfigure} % \caption{IMU angles} %\end{marginfigure} % % %\begin{figure*}[h] % \minipage{0.5\textwidth} % \includegraphics[width=1.0\linewidth]{figs/05/IMU_A2} % \captionof{a)}{ Handle bar IMU (A)} % \endminipage\hfill % \hspace{0pt} % \minipage{0.5\textwidth} % \includegraphics[width=1.0\linewidth]{figs/05/IMU_B2} % \captionof{b)}{ Frame IMU (B)} % \endminipage % \\[0pt] %\end{figure*} \begin{figure}[h!] \centering \includegraphics[width=0.85\linewidth]{figs/05/IMU_A2} \caption{Handle bar IMU (A)} \\[1cm] \end{figure} \begin{figure}[h!] \centering \includegraphics[width=0.85\linewidth]{figs/05/IMU_B2} \caption{Frame IMU (B)} \\[-2cm] \end{figure} \newpage \textbf{Extending angular range} The $\arctan$ and $\arcsin$ functions implemented only produce results between $-\pi/2$ and $\pi/2$. This range problem is solved by extending the angular orientation to a continuous spectrum. Any angle is defined as: \[\varphi=\varphi+2\pi\,k)\] with $k=0$ at the initial range $[-\pi/2,\pi/2]$. \begin{itemize} \begin{itemize} \item If $\varphi$ exceeds a value of $(k+1)\pi/2$, then the $k$ increments one unit: $k=k+1$ \item If $\varphi$ falls behind a value of $-(k+1)\pi/2$, then the $k$ decreases one unit: $k=k-1$ \end{itemize} \end{itemize} \begin{figure}[h!] \centering \includegraphics[width=0.9\linewidth]{figs/05/Angle_jump} \caption{Discontinuous vs continuous orientation} \\[-0cm] \end{figure} \textbf{Data obtained from the IMU} The control strategy requires some data to calculate the torque requirement from the tilting motor. \begin{table} \begin{tabular}{cl} & \textif{Handle Bar IMU} \\[2.5pt] \hline $\delta_{A}$ & Euler angle along vertical direction \\[2.5pt] $\dot{\delta}$ & Gyroscope along vertical direction \\[2.5pt] \hline \\ & \textif{Frame IMU} \\[2.5pt] \hline $\delta_{B}$ & Euler angle along vertical direction \\[2.5pt] $\dot{\Phi}$ & Gyroscope along vertical direction \\[2.5pt] $\theta$ & Euler angle along longitudinal direction \\[2.5pt] $\dot{\theta}$ & Gyroscope along longitudinal direction \\[2.5pt] $a_{per}$ & Linear acceleration along lateral direction \\[2.5pt] \hline \\ \end{tabular} \end{table} The steering angle $\delta$ is calculated from the relative orientation $\delta=\delta_{A}-\delta_{B}$ \newpage \subsection{Communication: Bluetooth Modules} The Arduino boards have two Bluetooth modules connected. The two modules, HC05 and HC06 are very similar. HC--05 is a more capable module that can be set to be either master or slave while HC--06 is a slave only device. These modules run on 3.3V power and have two modes of operation. In command mode AT commands can be sent to it and in data mode it transmits and receives data to another Bluetooth module. \begin{marginfigure}[5cm] \includegraphics[width=1.1\linewidth]{figs/05/android} \caption{Android app for remote control} \end{marginfigure} The HC--06 module was connected to an Android app in order to remotely control the PEV. The commands sent to this module controlled the rear motor, as well as the steering motor. The HC--05 module, on the other side, was connected to a Processing script, in which data was saved in real time and exported into a .csv file. In this way, it was possible to analyze the data coming from the sensors and verify the correct response of the control strategy. An application of this data sender module was used during the Media Lab's spring members week. The data from the PEV was streamed in real time and projected onto a table. Only basic data was presented during this event, for example, the speed of the rotary encoder and IMU data. The script was also prepared to align the projection to the table, as it can be seen in the Figure \ref{Captura3}. \begin{figure}[h!] \includegraphics[width=1\linewidth]{figs/05/Captura3} \caption{Live streamed data projected onto a table} \label{Captura3} \end{figure} \newpage \subsection{Arduino} \begin{marginfigure} \includegraphics[width=1\linewidth]{figs/05/mega} \caption{Arduino Mega} \end{marginfigure} The PEV uses two Arduino MEGA boards for the acquisition of the sensor data and for the control of the motors. The reason for using two boards recalls in the VESC controller. The straight forward implementation of the VESC library for Arduino makes appropriate to use connect both through UART connection. The Arduino Mega is able to manage 4 Serial connections simultaneously. Since the library makes use of one of them for debugging, it remains 3 Serial connections. The PEV has 4 motors incorporated (steer, handle bar, tilt, throttle), so two Arduino Megas are needed to control all the motors. The Arduinos will be connected through the I2C protocol. The code included in both boards has been included in the appendices. \subsection{Batteries} \begin{marginfigure}[2cm] \includegraphics[width=1\linewidth]{figs/05/battery} \caption{HWT-1004-7AB battery} \end{marginfigure} The PEV is powered by two set of DC batteries. The rear motor requires an input voltage of 36V, which is provided by the HWT-1004-7AB battery. This battery is a removable pack, is designed as a 10S4P battery pack by using Li(NiCoMn)O2 cells and its nominal capacity is 11.4 Ah. It also meets waterproof IPX4 and provides two-level protections. The first level protection is done by software which is typically slow to act, and the second level is done by hardware which react very fast, on the order of microseconds or milliseconds. HWT-1004-7AB supplies one auxiliary power – USB 5V, so that can satisfies the demand of variety electronical gadgets such as smart phones, MP3 player and head light. On the other side, the NIDEC motors need an input voltage of 24V. A pair of PowerSonic PS1290, each one of 12V, were selected for this task. The Power-Sonic PS-1290 is a 12 Volt 9 Amp Hour rechargeable sealed lead acid battery. Finally, the Arduino boards are powered by the VESC controller 5V output. \subsection{Electronic Schematic} The schematic of all the components and their connections to the two Arduino Mega boards is included in the next page: \newpage \begin{figure*}[h!] \includegraphics[width=1\linewidth]{figs/05/PEV_fritzing2} \caption{Electronic Schematic} \end{figure*} \newpage %\section{Fabrication} %\subsection{Laser Cut} %\subsection{Soldering} %\subsection{Sand Blaster} %\subsection{Water Jet} %\subsection{Sheet Metal Bending} %\subsection{Tube Bending} %\subsection{3D Printing} %\subsection{Drill} %\subsection{Tapping}
{ "alphanum_fraction": 0.7473368743, "avg_line_length": 64.8096153846, "ext": "tex", "hexsha": "b8ebb9ca59b0d75650f08aa34f9f9f0bf04ca9cb", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2018-11-16T07:29:54.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-31T00:54:01.000Z", "max_forks_repo_head_hexsha": "f547c9879ca2c2b12ee57ceff9d533061167b701", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "imartinezl/MIT-Media-Lab-latex-thesis", "max_forks_repo_path": "pages/05realScale.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f547c9879ca2c2b12ee57ceff9d533061167b701", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "imartinezl/MIT-Media-Lab-latex-thesis", "max_issues_repo_path": "pages/05realScale.tex", "max_line_length": 688, "max_stars_count": 3, "max_stars_repo_head_hexsha": "f547c9879ca2c2b12ee57ceff9d533061167b701", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "imartinezl/MIT-Media-Lab-latex-thesis", "max_stars_repo_path": "pages/05realScale.tex", "max_stars_repo_stars_event_max_datetime": "2021-01-08T15:09:27.000Z", "max_stars_repo_stars_event_min_datetime": "2018-11-25T16:15:25.000Z", "num_tokens": 19074, "size": 67402 }
\chapter{The five-fold activities (\emph{pañcakṛtya}) and the \emph{Oṃkāra}-mantra} \emph{Summary}: The Evanescence of Śiva after expounding the five-fold duties (\tl{pañcakṛtya}) and the Oṃkāra mantra to Brahmā and Viṣṇu. Brahmā and Viṣṇu said: 1. O Lord, please tell us the characteristic feature of the five-fold duties beginning with creation. Śiva said: I shall tell you the great secret of the five-fold duties, out of compassion for you. 2. O Brahmā and Viṣṇu, the permanent cycle of the five-fold duties consists of creation, maintenance, annihilation, concealment, and blessing. 3. \tl{Sarga} is the creation of the world; \tl{sthiti} is its maintenance; \tl{saṃhāra} is the annihilation; \tl{tirobhāva} is the removal and concealment; 4. Liberation (from the cycle of birth and death) is blessing. These five are my activities but are carried on by others silently as in the case of the statue at the Portal. 5. The first four activities concern the evolution of the world and the fifth one is the cause of salvation. All these constitute my prerogatives. 6-8. These activities are observed in the five elements by devotees—\tl{sarga} (creation) in the Earth, \tl{sthiti} (maintenance) in the waters, \tl{saṃhāra} (annihilation) in the fire, \tl{tirobhāva} (concealment) in the wind and \tl{anugraha} (liberation, the blessed state) in the firmament. Everything is created by the Earth; everything flourishes by virtue of the waters; everything is urged by the fire, everything is removed by the wind and everything is blessed by the firmament. Thus intelligent men must know the same. 9. In order to look after these five-fold activities (\tl{pañcakṛtya}) I have five faces, four in the four quarters and the fifth in the middle. 10. O sons, in view of your austerities you two have received the first two activities:—creation and maintenance. You have gratified me and are blessed therefore. 11. Similarly, the other two activities (annihilation and concealment) have been assigned to Rudra and Maheśa. The fifth one of \tl{anugraha} (liberation) cannot be taken up by any other. 12. All this previous arrangement has been forgotten by both of you due to lapse of time, not so by Rudra and Maheśa. 13. I have assigned them my equality in form, dress, activity, vehicle, seat, weapons \etc 14. O dear sons, your delusion was the result of your not meditating upon me. If you had retained my knowledge you would not have embibed this false pride of being Maheśa yourselves. 15. Hence, hereafter, both of you shall start reciting the mantra \tl{Oṃkāra} to acquire knowledge of me. It shall quell your false pride as well. 16. I have taught this great auspicious mantra. \tl{Oṃkāra} came out of my mouth. Originally it indicated me. 17. It is the indicator and I am the indicated. This mantra is identical with me. The repetition of this mantra is verily my repeated remembrance. 18-19. The syllable “A” came first from northern face; the syllable “U” from the western; the syllable “M” from the southern and the Bindu (dot) from the eastern face. The Nāda (mystical sound) came from the middle face. Thus the complete set cropped up in five-fold form. Then all of them united in the syllable of “Om”. 20. The two sets of created beings—\tl{nāma} (name) and \tl{rūpa} (form) are pervaded by this mantra. It indicates Śiva and Śakti. 21. From this also is born the five-syllabled mantra (\tl{namaśśivāya}). It indicates all knowledge. The syllables “NA” \etc follow the order of the syllables “A” \etc 22. From the five-syllabled mantra the five mothers were born. The \tl{Śiromantra} is born of that. The three-footed \tl{Gāyatrī} also came out of the four faces. 23. The entire set of Vedas and crores of mantras were born of that. Different things are achieved through different mantras but everything is achieved through \tl{Oṃkāra} alone. 24. By this root-mantra, the very enjoyment as well as salvation is achieved. All the royal mantras are auspicious and directly accord enjoyment. Nandikeśvara said: 25. The lord in the company of his consort Ambikā, assumed the role of the preceptor for both of them. He screened them and placed his lotus-like hand on their heads as they faced the north and slowly taught them the great mantra. 26-27. The two disciples received the mantra by repeating it thrice, along with the requisite \tl{yantra} and \tl{tantra} duly expounded. By way of fees, the disciples dedicated themselves. Thereafter standing near him with hands clasped in reverence they addressed the lord, the preceptor of the universe. Brahmā and Viṣṇu said: \begin{shloka}\itshape namo niṣkalarūpāya namo niṣkalatejase\\ namaḥ sakalanāthāya namaste sakalātmane\\ namaḥ praṇavavācyāya namaḥ praṇavaliṃgine\\ namaḥ sṛṣṭyādikartre ca namaḥ paṃcamukhāyate\\ paṃcabrahmasvarūpāya paṃca kṛtyāyate namaḥ\\ ātmane brahmaṇe tubhyamanaṃtaguṇaśaktaye\\ sakalākalarūpāya śaṃbhave gurave namaḥ\\ iti stutvā guruṃ padyairbrahmā viṣṇuśca nematuḥ (28-31) \end{shloka} 28-31. (The prayer): Obeisance to Thee of the bodiless form. Obeisance to Thee of the formless lustre. Obeisance to Thee the lord of everything. Obeisance to Thee the soul of everything or of the embodied form. Obeisance to Thee stated by the \tl{Praṇava}. Obeisance to Thee having \tl{Praṇava} as Thy symbol. Obeisance to Thee the author of creation \etc Obeisance to Thee of five faces. Obeisance to Thee identical with Pañcabrahma form. Obeisance to Thee of five-fold functions. Obeisance to Thee the Ātman, the Brahman, of endless attributes and power. Obeisance to Śiva the preceptor, possessed of both embodied and bodiless forms.” After eulogising the preceptor in verses Brahmā and Viṣṇu bowed to him. Īśvara said: 32. O dear sons, the truthful extract of everything has been narrated to you with demonstration. You shall recite as directed by the Goddess this Om mantra which is identical with me. 33. Your knowledge shall be stabilised. Permanent fortune shall stand by you. On the \tl{caturdaśī} day and on the day with Ārdrā star, the recital of this mantra will give you everlasting efficacy. 34-35. The recital of this mantra at the time when the transit of the sun is in the Ārdrā star is million-fold efficacious. In the context of worship, \tl{homa} and \tl{tarpaṇa}, the last quarter of the star Mṛgaśiras and the first quarter of Punarvasu must always be considered on a par with Ārdrā. The Vision is to be had at early dawn and within three \tl{muhūrtas} (two hours twenty-four minutes) thereafter. 36. \tl{Caturdaśī} is to be taken when it continues up to midnight. If it is only upto the early part of the night and joined with another thereafter, it is also recommended. 37. Although I consider the phallic and the embodied form to be equal, the phallic form is excellent for those who worship. Hence for those who seek salvation the latter is preferable to the former. 38-39. The others too shall install the phallic form with \tl{Oṃkāra} mantra and the embodied form with the five-syllabled mantra, with excellent articles of worship and adore with due homage. It will be easy for them to attain my region. Having thus instructed His disciples Śiva vanished there itself.
{ "alphanum_fraction": 0.7850506171, "avg_line_length": 45.06875, "ext": "tex", "hexsha": "f10584d87a5ffe31f7bf0fb496cf38522b90c11b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "535ad048995a35d3ebabd84842788e60f99158f2", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "elvendrim/shivamahapurana", "max_forks_repo_path": "samhita-vidyeshvara/vidyeshvara-10.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "535ad048995a35d3ebabd84842788e60f99158f2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "elvendrim/shivamahapurana", "max_issues_repo_path": "samhita-vidyeshvara/vidyeshvara-10.tex", "max_line_length": 83, "max_stars_count": null, "max_stars_repo_head_hexsha": "535ad048995a35d3ebabd84842788e60f99158f2", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "elvendrim/shivamahapurana", "max_stars_repo_path": "samhita-vidyeshvara/vidyeshvara-10.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2166, "size": 7211 }
\documentclass[10pt,a4paper]{scrartcl} \usepackage{lmodern} \title{\texttt{php-eps-macros}} \subtitle{A Set of Helper Macros in PHP to Write EPS Files} \author{Romaric Pujol\\\footnotesize{\texttt{[email protected]}}} \date{September 28, 2013} \usepackage{graphicx} \usepackage{listings} \usepackage{caption} \usepackage{amsmath} \newcommand\code[1]{\lstinline{#1}} \lstset{showspaces=false,showstringspaces=false} \lstset{basicstyle=\footnotesize\ttfamily} \newcommand\PS{PostScript} \newenvironment{note}{\par\leavevmode\medskip\textbf{Note.} }{\par\leavevmode\medskip} \usepackage{hyperref} \begin{document} \maketitle \section{Introduction} This document describes a set of PHP helper files and scripts to generate EPS (Encapsulated \PS) files. When writing documents in \LaTeX, the author might want to include figures in it. Figures can be generated in many ways by many existing softwares, but the author might want to make its own from scratch. If she likes programming, the PostScript language is a wonderful option to consider seriously. In this document, we assume that the reader is already familiar with EPS files and the \PS{} language. \section{Directory Content} \begin{itemize} \item \texttt{arrow-head.php}, see Section~\ref{ssec:arrow-head.php}, \item \texttt{coords.php}, see Section~\ref{ssec:coords.php}, \item \texttt{eps-header.php}, see Section~\ref{ssec:eps-header.php}, \item \texttt{fatal.php}, see Section~\ref{ssec:fatal.php}, \item \texttt{hyperbolic.php}, see Section~\ref{ssec:hyperbolic.php}, \item \texttt{newton-solver.php}, see Section~\ref{ssec:newton-solver.php}, \item \texttt{plotfunction-bezier-stupid-hom.php}, see Section~\ref{ssec:plotfunction-bezier-stupid-hom.php}, \item \texttt{plotimplicit-bezier-stupid-hom.php}, see Section~\ref{ssec:plotimplicit-bezier-stupid-hom.php}, \item \texttt{plotparametric-bezier-stupid-hom.php}, see Section~\ref{ssec:plotparametric-bezier-stupid-hom.php}, \item \texttt{SmartInsertFont.php}, see Section~\ref{ssec:SmartInsertFont.php}, \item \texttt{trigonometry.php}, see Section~\ref{ssec:trigonometry.php}, \item \texttt{utils.php}, see Section~\ref{ssec:utils.php}, \item \texttt{vector.php}, see Section~\ref{ssec:vector.php}. \end{itemize} \section{Structure of your PHP file} Your PHP file should start as follows: \lstset{language=PHP} \begin{lstlisting} <?php $lx=0; $ly=0; $ux=200; $uy=300; $author="Romaric Pujol"; $title="An EPS example file"; $date="2013/09/28"; require_once("eps-header.php"); ?> \end{lstlisting} Then, running \code{php file.php} will output the following EPS header: \begin{lstlisting} %!PS-Adobe-2.0 EPSF-3.0 %%BoundingBox: 0 0 200 300 %%Creator: Romaric Pujol %%CreationDate: 2013/09/28 %%Title: An EPS example file %%Pages: 1 %%EndComments \end{lstlisting} Afterwards, you're free to mix PHP and \PS. \section{Description of the PHP Files} \subsection{\texttt{arrow-head.php} --- Making Arrow heads} \label{ssec:arrow-head.php} This file implements one \PS{} function to draw an arrow head. A typical arrow head is represented on Figure~\ref{fig:arrow-head-explained}. \begin{figure}[ht!]% \centering \includegraphics{figs/arrow-head/arrow-head-measures}% \caption{Arrow head}% \label{fig:arrow-head-explained} \end{figure} The \code{require_once "arrow-head.php"} command will include the \PS{} function \texttt{ArrowHead} that draws arrow heads, used with the following syntax: \begin{center} \textit{angle}\quad$X$\quad$Y$\quad\texttt{ArrowHead} \end{center} where \textit{angle} is the angle (in degrees) from the horizontal, counterclockwise (Figure~\ref{fig:arrow-head-explained} shows an angle of $180$ degrees) and $(X,Y)$ is the coordinate of the vertex of the arrow. The \PS{} function uses the following variables for the dimensions: \begin{itemize} \item \texttt{/ArrowHeadAngle}: the angle $\theta$. It is set to $15$ by default, \item \texttt{/ArrowHeadProportion}: the ratio $OB/OA$. It is set to $1.67$ by default, \item \texttt{/ArrowHeadHorizontalSize}: the length $OB$. It is set to $3$ by default, \end{itemize} and these values can be modified. \subsection{\texttt{coords.php} --- Dealing with Coordinates} \label{ssec:coords.php} Even though the \PS{} language has these wonderful transformation matrices, it is not always recommended to use them for change of coordinates, since the transformation also changes the strokes (the lines that are drawn). This file will include the \texttt{Coords} \PS{} function, to be used with the following syntax: \begin{center} $x$\quad$y$\quad\texttt{Coords}\quad$X$\quad$Y$ \end{center} where the input is $(x,y)$, the coordinates in the relative frame, and the output $(X,Y)$ is the coordinates in the absolute frame. For this function to work, the information about the relative frame must have been given. It uses eight variables: \texttt{xmin}, \texttt{ymin}, \texttt{xmax}, \texttt{ymax}, \texttt{Xmin}, \texttt{Ymin}, \texttt{Xmax} and \texttt{Ymax}. Include this file (or files that use it) after having defined these eight variables! \subsection{\texttt{eps-header.php} --- Let's Start!} \label{ssec:eps-header.php} You will very likely include this before anything else. Make sure you have the variables \code{$lx}, \code{$ly}, \code{$ux}, \code{$uy} defined. Optionally you can define the variables \code{$author}, \code{$title} and \code{$date}. \subsection{\texttt{fatal.php} --- Die Hard!} \label{ssec:fatal.php} This file only contains the PHP function \texttt{fatal} that takes $1+1$ arguments. The first argument is the error string to be output on \textit{stderr} and the other optional argument is: \begin{itemize} \item either a numerical value that corresponds to the exit code, \item or another string that will be output to \textit{stderr} on a second line, in which case the exit code is $1$. \end{itemize} If this optional argument is not give, the exit code is $1$. \subsection{\texttt{hyperbolic.php} --- Exponential and hyperbolic functions } \label{ssec:hyperbolic.php} This \PS{} set of functions include: \begin{itemize} \item \code{EXP}: the exponential in base $\mathrm{e}$, \item \code{cosh}, \code{sinh}, \code{tanh}, \item \code{arccosh}, \code{arcsinh}, \code{arctanh}. \end{itemize} \subsection{\texttt{newton-solver.php} --- Equation solver} \label{ssec:newton-solver.php} This \PS{} function is to find approximations of roots of an equation of the form $f(x)=0$ using the Newton--Raphson method. The syntax is: \begin{center} \texttt{\{}$f$\texttt{\}}\quad\texttt{\{}$f'$\texttt{\}}\quad$x_0$\quad$\varepsilon$\quad\texttt{NewtonSolver}\quad$x^*$ \end{center} where $f$ is the function to find the root of, $f'$ is its derivative, $x_0$ is an approximation of a root and $\varepsilon$ is the wanted accuracy. The method will stop when either $\lvert x_{\text{next}}-x\rvert\leq\varepsilon$ or after \verb=@NewtonSolverMaxIterations= number of iterations. The \PS{} variable \verb=@NewtonSolverMaxIterations= is set to $20$ by default. The approximation of the root, $x^*$ is pushed onto the stack. \subsection{\texttt{plotfunction-bezier-stupid-hom.php} --- Plot Graphs of Functions} \label{ssec:plotfunction-bezier-stupid-hom.php} This \PS{} function is to plot graphs of functions using B\'ezier curves (and not line segments). The result is usually a beautiful smooth curve that scales very well. But there are a few limitations. Read on. Let me first explain the nomenclature: the part \emph{hom} in the name stands for homogeneous, that is the interval is divided into $N$ homogeneous subintervals ($N$ is given by the user). The part \emph{stupid} is because each subinterval of the subdivision is itself divided into three subintervals of equal lengths to determine the control points for the B\'ezier curve. This has some flaws if the function has, e.g., very steep slopes. To be able to use this \PS{} function to plot graphs, you need to know the derivative of your function. The syntax is as follows: \begin{center} \texttt{\{}$f$\texttt{\}}\quad\texttt{\{}$f'$\texttt{\}}\quad$x_{\text{min}}$\quad$x_{\text{max}}$\quad$N$\quad\texttt{PlotFunctionBezierStupidHom} \end{center} Figure~\ref{fig:plotfunction-bezier-stupid-hom-demo} shows the result of: \lstinputlisting[caption=plotfunction-bezier-stupid-hom-demo.php]{figs/plotfunction-bezier-stupid-hom/plotfunction-bezier-stupid-hom-demo.php} Notice the use of curly brackets to enclose $f$ and $f'$! Also notice that I defined functions \texttt{cosrad} and \texttt{sinrad} being the (regular) $\cos$ and $\sin$ functions with the angle given in radians. That's because the \PS{} \texttt{cos} and \texttt{sin} functions take an angle in degrees. \begin{figure}[ht!]% \centering \includegraphics{figs/plotfunction-bezier-stupid-hom/plotfunction-bezier-stupid-hom-demo}% \caption{Graph of $\cos$ using the \texttt{plotfunction-bezier-stupid-hom.php} file}% \label{fig:plotfunction-bezier-stupid-hom-demo}% \end{figure} Even though \texttt{PlotFunctionBezierStupidHom} is a stupid function with a naive algorithm, it still outputs beautiful curves in many cases! In fact, this PHP file include the file \texttt{plotparametric-bezier-stupid-hom.php}, and uses its PlotParametricBezierStupidHom function. The implementation is just: \begin{lstlisting} /PlotFunctionBezierStupidHom { { } { pop 1 } 7 2 roll PlotParametricBezierStupidHom \end{lstlisting} \subsection{\texttt{plotimplicit-bezier-stupid-hom.php} --- Plot Implicit Curve} \label{ssec:plotimplicit-bezier-stupid-hom.php} These \PS{} functions are to plot implicit curves using B\'ezier curves (and not line segments). The result is usually a beautiful smooth curve that scales very well. But there are a few limitations. Read on. Let me first explain the nomenclature: the part \emph{hom} in the name stands for homogeneous, that is the interval of parametrization is divided into $N$ homogeneous subintervals ($N$ is given by the user). The part \emph{stupid} is because each subinterval of the subdivision of the parametrization is itself divided into three subintervals of equal lengths to determine the control points for the B\'ezier curve. There are two functions here. Depending on how you want to determine the portion you want to plot, you'll use one or the other. %%TODO: Explain! \subsection{\texttt{plotparametric-bezier-stupid-hom.php} --- Plot Parametric Curves} \label{ssec:plotparametric-bezier-stupid-hom.php} This \PS{} function is to plot parametric curves using B\'ezier curves (and not line segments). The result is usually a beautiful smooth curve that scales very well. But there are a few limitations. Read on. Let me first explain the nomenclature: the part \emph{hom} in the name stands for homogeneous, that is the interval of parametrization is divided into $N$ homogeneous subintervals ($N$ is given by the user). The part \emph{stupid} is because each subinterval of the subdivision of the parametrization is itself divided into three subintervals of equal lengths to determine the control points for the B\'ezier curve. This has some flaws if the function has, e.g., very high velocities. To be able to use this \PS{} function to plot parametric curves, you need to know the derivative of your coordinate functions. The syntax is as follows: \begin{center} \texttt{\{}$x$\texttt{\}}\quad\texttt{\{}$x'$\texttt{\}}\quad\texttt{\{}$y$\texttt{\}}\quad\texttt{\{}$y'$\texttt{\}}\quad$t_{\text{min}}$\quad$t_{\text{max}}$\quad$N$\quad\texttt{PlotParametricBezierStupidHom} \end{center} Figure~\ref{fig:plotparametric-bezier-stupid-hom-demo} shows the result of: \lstinputlisting[caption=plotparametric-bezier-stupid-hom-demo.php]{figs/plotparametric-bezier-stupid-hom/plotparametric-bezier-stupid-hom-demo.php} It plots the parametric curve given by: \[\begin{cases} x'(t)=\dfrac t{2\pi}\cos t\\ y'(t)=\dfrac t{2\pi}\sin t, \end{cases}\qquad t\in[0,4\pi].\] Notice the use of curly brackets to enclose $x$, $x'$, $y$ and $y'$! Also notice that I defined functions \texttt{cosrad} and \texttt{sinrad} being the (regular) $\cos$ and $\sin$ functions with the angle given in radians. That's because the \PS{} \texttt{cos} and \texttt{sin} functions take an angle in degrees. \begin{figure}[ht!]% \centering \includegraphics{figs/plotparametric-bezier-stupid-hom/plotparametric-bezier-stupid-hom-demo}% \caption{Parametric curve using the \texttt{plotparametric-bezier-stupid-hom.php} file}% \label{fig:plotparametric-bezier-stupid-hom-demo}% \end{figure} Even though \texttt{PlotFunctionBezierStupidHom} is a stupid function with a naive algorithm, it still outputs beautiful curves in many cases! \subsection{\texttt{trigonometry.php} --- Elementary Trigonometry in Radians} \label{ssec:trigonometry.php} This just defines the \PS{} macro \verb=pi= for $\pi$ and the $\sin$, $\cos$ $\tan$ and $\operatorname{atan}$ functions in radians as the \PS{} macros \verb=sinr=, \verb=cosr=, \verb=tanr= and \verb=atanr= respectively. \subsection{\texttt{SmartInsertFont.php} --- Inserting Fonts} \label{ssec:SmartInsertFont.php} Text is one of the most difficult aspect of making an EPS file by hand. Putting just one letter here and there is (almost) fine, though. I'm planning to do some nice (and simple!) helpers to place them and maybe to compose simple text and maybe to drive a \TeX engine to typeset some for me\ldots but for now you only have the function PHP \texttt{SmartInsertFont} that will \texttt{locate} the fontname given. The syntax is: \begin{center} \texttt{SmartInsertFont\quad(}\quad\textit{fontname\quad[\quad,\quad path regex\quad]}\quad{)} \end{center} This will just dump the font in the document. \begin{figure}[ht!]% \centering \includegraphics{figs/SmartInsertFont/SmartInsertFont-demo}% \caption{\texttt{SmartInsertFont} helps inserting fonts in EPS document}% \label{fig:SmartInsertFont-demo.php}% \end{figure} Figure~\ref{fig:SmartInsertFont-demo.php} was obtained thus: \lstinputlisting[caption=SmartInsertFont-demo.php]{figs/SmartInsertFont/SmartInsertFont-demo.php} \begin{note} You need to have \texttt{t1ascii} installed to use this function. \end{note} \subsection{\texttt{utils.php} --- Miscellaneous Utilies} \label{ssec:utils.php} This file contains miscellaneous PostScript macros: \begin{center} $a$ $b$ \texttt{min}\\ $a$ $b$ \texttt{max} \end{center} ($\min$ and $\max$ operators). \subsection{\texttt{vector.php} --- Drawing Arrows} \label{ssec:vector.php} This file defines the \PS{} function \texttt{Vector} that draws a line segment with an arrow head. If the length of the line segment is less than the size of the arrow head, nothing is drawn (so as to not have an ugly arrow head on its own). The syntax is as follows: \begin{center} $X_a$\quad$Y_a$\quad$X_b$\quad$Y_b$\quad\texttt{Vector} \end{center} This will draw a vector from $(X_a,Y_a)$ to $(X_b,Y_b)$. This file implicitly calls \texttt{arrow-head.php} and uses the \PS{} function \texttt{ArrowHead} for the arrow head. Figure~\ref{fig:vector-demo} represents such a vector. \begin{figure}[ht!]% \centering \includegraphics{figs/vector/vector-demo}% \caption{Vectors drawn by the \PS{} function \texttt{Vector} from file \texttt{vector.php}}% \label{fig:vector-demo}% \end{figure} There's also the \texttt{DoubleVector} macro that draws an arrow head at both sides of the segment: \begin{center} $X_a$\quad$Y_a$\quad$X_b$\quad$Y_b$\quad\texttt{DoubleVector} \end{center} \section{Tips} In this section I briefly describe my workflow tools for creating \LaTeX{} documents with \PS{} illustrations. It works perfectly well for me. I hope it can be useful to some other people. \subsection{The Main Directory} Each document I created is fully contained in one directory. I use the \texttt{git} version control system and a private server for backups, but that's not mandatory. Let's say the directory corresponding to my document is named \texttt{mydoc}. If the document contains several chapters or parts, I put each chapter or part in a different file. The tree structure of \texttt{mydoc} is as follows: \begin{lstlisting} $ tree -a . |__ bin |__ some binarys (optional)... |__ document.tex |__ chapter1.tex |__ chapter2.tex ... |__ figs |__ figs_chapter1 |__ figure1.php |__ figure2.php |__ ... |__ Makefile -> ../Makefile-figs |__ figs_chapter2 |__ figure1.php |__ figure2.php |__ ... |__ Makefile -> ../Makefile-figs |__ ... |__ Makefile |__ Makefile-figs |__ .git |__ the git stuff ... |__ .gitignore |__ Makefile |__ php |__ the php files of this package \end{lstlisting} \subsection{The \texttt{Makefile}s} Im \texttt{mydoc/} I have a main \texttt{Makefile} with several rules. I use \texttt{latexmk} to generate the document. It typically looks like: \lstinputlisting[caption=\texttt{Makefile},language=make]{Makefile} Then, in the \texttt{figs/} directory, I have two \texttt{Makefile}s: \lstinputlisting[caption=\texttt{figs/Makefile},language=make]{figs/Makefile} and \lstinputlisting[caption=\texttt{figs/Makefile-figs},language=make]{figs/Makefile-figs} Then, in each \texttt{figs/chapter$x$} directory I soft link the \texttt{Makefile-figs} to a \texttt{Makefile} file using the command: \begin{center} \texttt{ln -s ../Makefile-figs ./Makefile} \end{center} Then I can add any directories in \texttt{figs/}, perform the previous command, and each PHP file in there will be converted to EPS and PDF automatically. \subsection{The Workflow} Generating my document is as easy as typing \lstinline{make} and adding new figures is just a matter of writing the PHP file in the corresponding \texttt{figs/chapter$x$} directory. The rule \texttt{make clean} or \texttt{make C} will clean up everything. \end{document}
{ "alphanum_fraction": 0.7544718191, "avg_line_length": 43.7881773399, "ext": "tex", "hexsha": "eaeb6898cf0e07fbe61c48534ca654a41cdc9892", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2c3ac300b4e1c55a4c4efc39cd6dad40c6f0bde3", "max_forks_repo_licenses": [ "WTFPL" ], "max_forks_repo_name": "rpujol/php-eps-macros", "max_forks_repo_path": "doc/php-eps-macros-doc.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "2c3ac300b4e1c55a4c4efc39cd6dad40c6f0bde3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "WTFPL" ], "max_issues_repo_name": "rpujol/php-eps-macros", "max_issues_repo_path": "doc/php-eps-macros-doc.tex", "max_line_length": 210, "max_stars_count": null, "max_stars_repo_head_hexsha": "2c3ac300b4e1c55a4c4efc39cd6dad40c6f0bde3", "max_stars_repo_licenses": [ "WTFPL" ], "max_stars_repo_name": "rpujol/php-eps-macros", "max_stars_repo_path": "doc/php-eps-macros-doc.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5146, "size": 17778 }
\section{Temporal difference learning for Markov Decision Processes}
{ "alphanum_fraction": 0.8309859155, "avg_line_length": 17.75, "ext": "tex", "hexsha": "575339d4796b9375225403256539713243e7dfc2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/ai/reinforcement/02-00-temporal.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/ai/reinforcement/02-00-temporal.tex", "max_line_length": 68, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/ai/reinforcement/02-00-temporal.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 13, "size": 71 }
\section{Pretty Printer}~\label{sec:pretty} \todo{Discuss Pretty printer Implementation} There are several prettyPrinter. \begin{itemize} \item One in copilot-sbv that prettyprints and expression to an ACSL contract \item One in copilore-core that prettyprints an expression to an Dot graph. This prettyPrinter has two entry points : prettyPrintDot, which prints the whole spec, and prettyExprDot which has a boolean value as a parameter which tells if the prettyPrinter has to go through the external functions (print parameters recursively, ..., True for yes, False for no). When generating the dot source for each C source file, we do not go through the external functions, because SBV does not do so. But if you want a more global view, you should go through. In this case, it is better to use the prettyPrintDot function, which sets this parameter as True directly. \item One in core that prettyprints (and hence it is a text version of the Dot one). \end{itemize} The prettyPrinting technology, specially developed for Copilot by Interns, Supervisors and Associates Inc.$\circledR$, consists in an induction on the syntax of the language, or by induction on the AST. If the node of the AST is an addition, it prints "+", if it is a multiplication, it prints "*", and the same for all kind of nodes. This unique technology allows us to prettyPrint efficiently an expression originally in the form of (sin a + cos b * ln 3) into (sin a + cos b * ln 3). A more evolved version of it, adds a unique tag to all nodes, in order to print a dot graph of it. This could have been written using a monad, but why using monads, when gore ways exist for doing this (cf prettyDot, which is not so pretty). Actually, we take one parameter, which is an integer (the current tag) and an other integer parameter, which is the parent node to which we should create an edge. Whenever we create a new node, make an edge to the father, and we increment the tag. Now the new node is a father for all of its sons. Colors for each node are added, according to the type of the node (unary, binary operation, label, function call, ...), and those colors were chosen randomly by a Drunken Sailor.
{ "alphanum_fraction": 0.7746220797, "avg_line_length": 145.5333333333, "ext": "tex", "hexsha": "83e5dcd4b2c3439273a5c48e5f7df8cd54b2ba21", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "Copilot-Language/copilot-discussion", "max_forks_repo_path": "TutorialAndDevGuide/DevGuide/PrettyPrinter.tex", "max_issues_count": 30, "max_issues_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047", "max_issues_repo_issues_event_max_datetime": "2021-09-07T22:34:17.000Z", "max_issues_repo_issues_event_min_datetime": "2019-04-01T20:24:19.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "Copilot-Language/copilot-discussion", "max_issues_repo_path": "TutorialAndDevGuide/DevGuide/PrettyPrinter.tex", "max_line_length": 718, "max_stars_count": 6, "max_stars_repo_head_hexsha": "caccad918b23dae991095344a845827ddccd6047", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "Copilot-Language/copilot-discussion", "max_stars_repo_path": "TutorialAndDevGuide/DevGuide/PrettyPrinter.tex", "max_stars_repo_stars_event_max_datetime": "2021-05-17T13:20:09.000Z", "max_stars_repo_stars_event_min_datetime": "2015-06-10T00:44:21.000Z", "num_tokens": 505, "size": 2183 }
\documentclass{homework} \course{Math 5522H} \author{Jim Fowler} \input{preamble} \DeclareMathOperator{\Res}{Res} \begin{document} \maketitle \begin{inspiration} Luck is the residue of design \byline{Branch Rickey} \end{inspiration} \section{Terminology} \begin{problem} Define the \textbf{residue} of $f$ at the point $z$, which we write $\Res(f,z)$. \end{problem} \section{Numericals} \begin{problem} Evaluate $\Res(f,z)$ for the function $f(z) = \displaystyle\frac{e^z}{z^2-1}$. \end{problem} \begin{problem}\label{residues-all-one}Evaluate $\Res(f,z)$ for the function $f(z) = \pi \cot (\pi z)$. \end{problem} \begin{problem}\label{residue-coth}Compute $\Res(f,\pm bi)$ for the function \[ f(z) = \frac{\pi \cot(\pi z)}{z^2 + b^2}. \] \end{problem} \begin{problem} Evaluate the integrals \[ \int_{-\infty}^\infty \frac{\sin x}{1+x^2} \, dx \mbox{ and } \int_{-\infty}^\infty \frac{\cos x}{1+x^2} \, dx. \] \end{problem} \begin{problem}\label{integral-for-euler-reflection}For a real number $\lambda \in (0,1)$, evaluate \[ \int_{-\infty}^\infty \frac{e^{\lambda x}}{1 + e^x} \, dx. \] \end{problem} \begin{problem} Evaluate the integral $\displaystyle\int_{0}^{2\pi} \frac{1}{3 + \sin^2 x} \, dx$. \end{problem} \begin{problem} Evaluate the integral $\displaystyle \int_0^\pi \log \sin x \, dx$. \end{problem} \begin{problem} Evaluate the integral $\displaystyle\int_{-\infty}^{\infty} \frac{1-x}{1-x^7} \, dx$. \end{problem} \begin{problem} Show that $f(z) = z^5 + 15z - 1$ has five zeros in $B_2(0)$, and one zero in $B_{1/15}(0)$. \end{problem} \begin{problem}\label{form-at-infinity}Set $w = 1/z$ and compute $f(w) \, dw$ in terms of $f(z) \, dz$. \end{problem} \section{Exploration} \begin{problem} Fix $w \in \R$ with $w>0$. By the intermediate value theorem, the polynomial $f(z) = z^4 + 4w^3 z - 1$ has a real root in the interval $(-\infty,0)$ and another in $(0,1)$, along with two complex roots $a \pm bi$. Use Gauss-Lucas (\ref{gauss-lucas}) and Rouch\'e's (\ref{rouches-theorem}) theorem to describe a subset of $\C$ containing $a\pm bi$. \end{problem} \begin{problem} What is the correct definition of $\Res(f,\infty)$? Generally, we would shift the viewport by replacing $f$ with the function $g(z) = f(1/z)$ and then we study $g$ near $z = 0$ in order to investigate ``$f$ near $\infty$.'' Does \ref{form-at-infinity} help here? \end{problem} \begin{problem}\label{rouches-theorem}Suppose $U$ is an open set containing the closed disk $D_r(z_0)$ and $f, g : U \to \C$ are holomorphic functions satisfying \[ \abs{f(z)} > \abs{g(z)} \] for $z \in \partial D_r(z_0)$. Prove \textbf{Rouch\'e's theorem} that $f$ and $f+g$ have the same number of zeros, counted with multiplicity, in $B_r(z_0)$. (This is sometimes called the dog leash theorem---can you see why?) \end{problem} \begin{problem} Use \ref{rouches-theorem} to give another proof of the Fundamental Theorem of Algebra. \end{problem} \begin{problem} It is important to recognize patterns in how residues equip us to evaluate integrals, so that, when presented with a fresh integration problem, we have some ideas about the best tool for the task. Develop a tool for evaluating \[ \int_0^{2\pi} f(\cos \theta,\sin \theta) \, d\theta \] via residues. Here $f$ is a rational function, explain how to make the substitution $z = e^{i\theta}$ to rewrite $f(\cos \theta,\sin \theta)$ in terms of $z$, and similarly rewrite $d\theta$ in terms of $dz$ to reduce the given integral to a certain contour integral. \end{problem} \begin{problem}\label{summation-theorem}Sometimes the residue calculus permits us to sum series. Suppose $f$ is meromorphic with poles $z_1,\ldots,z_N$ with $z_j \not\in \Z$. Our goal is a formula \[ \sum_{n=-\infty}^{\infty} f(n) = - \sum_{j=1}^N \Res(g, z_j) \] where $g(z) = \pi \cot(\pi z) f(z)$. \textit{Hint:} Consider a square contour $R$ centered at the origin with side-length $2N+1$. You may assume that $\pi \cot(\pi z)$ is bounded on $R$ independent of $N$. You will then need to impose some bound on $f$ on the contour $R$. \end{problem} \begin{problem} Apply \ref{residue-coth} and \ref{summation-theorem} to evaluate \[ \sum_{n=-\infty}^\infty \frac{1}{n^2 + b^2}. \] \end{problem} \section{Prove or Disprove and Salvage if Possible} \begin{problem}\label{zero-residue-not-enough}Suppose $f : D_r(z_0) \to \C$ is holomorphic. The residue $\Res(f,z_0)$ vanishes if and only if the singularity $z_0$ is removable. \end{problem} \begin{problem}\label{open-mapping-theorem}If $U \subset \C$ is open and $f : U \to \C$ is holomorphic, then $f(U)$ is open. % missing nonconstant, can be proved from rouche \end{problem} \end{document}
{ "alphanum_fraction": 0.6667357513, "avg_line_length": 32.6013513514, "ext": "tex", "hexsha": "b4fc6f58a74a4faafcaacedbd744f45382c4f8f6", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-01-11T18:43:51.000Z", "max_forks_repo_forks_event_min_datetime": "2021-01-11T18:43:51.000Z", "max_forks_repo_head_hexsha": "c9fc5eb915c6d29d91a864dfe066878b75305c42", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "kisonecat/math5522h", "max_forks_repo_path": "problem-sets/set09.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c9fc5eb915c6d29d91a864dfe066878b75305c42", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "kisonecat/math5522h", "max_issues_repo_path": "problem-sets/set09.tex", "max_line_length": 277, "max_stars_count": 1, "max_stars_repo_head_hexsha": "c9fc5eb915c6d29d91a864dfe066878b75305c42", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "kisonecat/math5522h", "max_stars_repo_path": "problem-sets/set09.tex", "max_stars_repo_stars_event_max_datetime": "2021-01-13T03:38:29.000Z", "max_stars_repo_stars_event_min_datetime": "2021-01-13T03:38:29.000Z", "num_tokens": 1678, "size": 4825 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % CS240: Programming in C % Copyright 2016 Pejman Ghorbanzade <[email protected]> % Creative Commons Attribution-ShareAlike 4.0 International License % https://github.com/ghorbanzade/UMB-CS240-2016S/blob/master/LICENSE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \def \topDirectory {.} \def \resDirectory {\topDirectory/src/c/main/ls08} \def \texDirectory {\topDirectory/src/tex} \def \styDirectory {\texDirectory/sty} \def \cfgDirectory {\texDirectory/cfg} \def \imgDirectory {\texDirectory/img} \documentclass[compress]{beamer} %\mode<presentation> %\usetheme{default} \usepackage{\styDirectory/directives} \input{\cfgDirectory/config} \usepackage{\styDirectory/beamerthemePejman} \doc{number}{8} %\setbeamertemplate{footline}[text line]{} \begin{document} \prepareCover \section{Recursion} \begin{slide} \begin{block}{Definition} A recursive function is a function whose block statements include a call to itself. \end{block} \end{slide} \begin{slide} \begin{block}{Example} \inputminted[ fontsize=\scriptsize, firstline=10, linenos ]{c}{\resDirectory/hellos.c} \end{block} \end{slide} \begin{slide} \begin{block}{Conditions} \begin{itemize} \item[] The recursive call chain must terminate at some point. \item[] A recursive call must be compulationally less complicated. \item[] No two recursive calls should overlap. \end{itemize} \end{block} \end{slide} \begin{slide} \begin{block}{Caution} \begin{itemize} \item[] Numerous recursive calls will cause prohibitive memory allocation resulting in a stack overflow. \item[] Recursion simplifies implementation yet increases space complexity. \end{itemize} \end{block} \end{slide} \section{Macros} \begin{slide} \begin{block}{Definition} A macro is a fragment of code which has been given a name. Whenever the name is used, it is replaced by the content of the macro. \begin{terminal} #define @*\textit{macro\_name}*@ @*\textit{token\_sequence}*@ \end{terminal} \end{block} \end{slide} \begin{slide} \begin{block}{Types} \begin{description} \item[Object-like Macros] Macros that resemble data objects \item[Function-like Macros] Macros that resemble function calls \end{description} \end{block} \end{slide} \begin{slide} \begin{block}{Object-like Macros} An object-like macro is a simple identifier to be replaced by a code fragment. It is most commonly used to give symbolic names to constants. \begin{terminal} #define BUFFER_SIZE 256 \end{terminal} \end{block} \end{slide} \begin{slide} \begin{block}{Function-like Macros} A function-like macro is a macro whose identifier follows a list of parameters. When a macro is \textit{expanded}, each use of a parameter in its body is replaced by the tokens of the corresponding argument. \begin{terminal} #define max(x, y) ((x > y) ? x : y) \end{terminal} \end{block} \end{slide} \begin{slide} \begin{block}{Coding Style Convention} \begin{itemize} \item[] To easily distinguish them from other variables, macro names are written in upper case. \item[] To avoid common pitfalls, using function-like macros is often discouraged. \end{itemize} \end{block} \end{slide} \section{Registers} \begin{slide} \begin{block}{Definition} Computer processors provide a very limited number of temporary data storage units, called registers. Retreiving data for processing from registers is much faster than from memory. Hence, storing a heavily used variable in a register allows efficient access to and manipulation of its value. \end{block} \end{slide} \begin{slide} \begin{block}{Usage} C provides the \alert{\texttt{register}} keyword to \emph{ask} for a variable to be stored in a register. The compiler will fulfill your request, if a free register is available and it agrees that the specified variable should be stored in a register. \begin{terminal} register int i = 1; \end{terminal} \end{block} \end{slide} \begin{slide} \begin{block}{Drawback} Once petitioned to be stored in a register, you may not access the address of the variable. \end{block} \end{slide} \begin{slide} \begin{block}{Note} Modern compilers will automatically store heavily used variables in registers. \end{block} \end{slide} \end{document}
{ "alphanum_fraction": 0.7224517906, "avg_line_length": 24.2, "ext": "tex", "hexsha": "31a295ba28db1aeed95eea5dd91b3495724bf024", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c32c866cbe5f7d7044f51f2bcd689b33bda61980", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ghorbanzade/UMB-CS240-2016S", "max_forks_repo_path": "src/tex/main/ls08/ls08.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "c32c866cbe5f7d7044f51f2bcd689b33bda61980", "max_issues_repo_issues_event_max_datetime": "2016-06-20T03:04:35.000Z", "max_issues_repo_issues_event_min_datetime": "2016-05-16T23:55:39.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ghorbanzade/UMB-CS240-2016S", "max_issues_repo_path": "src/tex/main/ls08/ls08.tex", "max_line_length": 146, "max_stars_count": 1, "max_stars_repo_head_hexsha": "c32c866cbe5f7d7044f51f2bcd689b33bda61980", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ghorbanzade/UMB-CS240-2016S", "max_stars_repo_path": "src/tex/main/ls08/ls08.tex", "max_stars_repo_stars_event_max_datetime": "2020-05-03T18:41:24.000Z", "max_stars_repo_stars_event_min_datetime": "2020-05-03T18:41:24.000Z", "num_tokens": 1158, "size": 4356 }
\section{$n$-step Bootstrapping} \subsection{$n$-step TD Prediction} $n$-step TD prediction is still TD because it changes earlier estimate. It did not update anything for the first $n-1$ steps. If $t + n \geq T$, the missing terms are treated as $0$. It is defined as: \begin{equation} G_{t:t+n} = R_{t+1} + \gamma R_{t+2} + \dots + \gamma^{n-1} R_{t+n} + \gamma^n V_{t+n-1}(S_{t+n}) \end{equation} The algorithm (\ref{algo:nsteptdpredition}) contains detail. \begin{algorithm} \caption{$n$-step TD prediction, estimate $v_\pi$}\label{algo:nsteptdpredition} \begin{algorithmic}[1] \State $ \alpha \in (0,1]$ \State $V(s) \gets$ random \State $t \gets 0$ \Statex \Loop \State choose $S_0$ \State $T \gets \infty$ \While{$\tau < T - 1$} \If{$t < T$} \State take action according to $\pi(\cdot|S_t)$ \State store $R_{t+1}$ and $S_{t+1}$ \If{$S_{t+1}$ is terminal} \State $T \gets t+1$ \EndIf \EndIf \State $\tau \gets t - n + 1$ \Comment $\tau$ is the pivot of update \If{$\tau \geq 0$} \State $G \gets \sum\limits_{i=\tau+1}^{\min (\tau+n,T)} \gamma^{i-\tau-1} R_i$ \Comment $G_{\tau:\tau+n}$ \If{$\tau + n < T$} \State $G \gets G + \gamma^n V(S_{\tau + n})$ \EndIf \State $V(S_\tau) \gets V(S_\tau) + \alpha \Big(G - V(S_\tau)\Big)$ \EndIf \State $t \gets t+1$ \EndWhile \EndLoop \end{algorithmic} \end{algorithm} \subsection{$n$-step Sarsa} It is the same as $n$-step TD prediction with $q$ and $\varepsilon$-greedy. \begin{equation} G_{t:t+n} = R_{t+1} + \gamma R_{t+2} + \dots + \gamma^{n-1} R_{t+n} + \gamma^n Q_{t+n-1}(S_{t+n},A_{t+n}) \end{equation} The algorithm (\ref{algo:nstepsarsa}) contains detail. \begin{algorithm} \caption{$n$-step Sarsa, estimate $q_\pi$ or $q_*$}\label{algo:nstepsarsa} \begin{algorithmic}[1] \State $ \alpha \in (0,1]$ \State $Q(s,a) \gets$ random \State $\pi \gets$ random $\varepsilon$-greedy policy or a given fixed policy \State $t \gets 0$ \Statex \Loop \State choose $S_0$ \State choose action $A_0 \sim \pi (\cdot | S_0)$ \State $T \gets \infty$ \While{$\tau < T - 1$} \If{$t < T$} \State take action $A_t$ and store $R_{t+1}$ and $S_{t+1}$ \If{$S_{t+1}$ is terminal} \State $T \gets t+1$ \Else \State choose $A_{t+1} \sim \pi(\cdot|S_{t+1})$ \EndIf \EndIf \State $\tau \gets t - n + 1$ \Comment $\tau$ is the pivot of update \If{$\tau \geq 0$} \State $G \gets \sum\limits_{i=\tau+1}^{\min (\tau+n,T)} \gamma^{i-\tau-1} R_i$ \If{$\tau + n < T$} \State $G \gets G + \gamma^n Q(S_{\tau + n}, A_{\tau + n})$ \Comment $G_{\tau:\tau+n}$ \EndIf \State $Q(S_{\tau}, A_{\tau}) \gets Q(S_{\tau}, A_{\tau}) + \alpha \Big(G - Q(S_{\tau}, A_{\tau})\Big)$ \State update $\pi_*$ \Comment update as a $\varepsilon$-greedy policy if calculating $q_*$ \EndIf \State $t \gets t+1$ \EndWhile \EndLoop \end{algorithmic} \end{algorithm} \subsection{$n$-step Expected Sarsa} It is the same as $n$-step Sarsa except that it uses expectation at the last step: \begin{equation} G_{t:t+n} = R_{t+1} + \gamma R_{t+2} + \dots + \gamma^{n-1} R_{t+n} + \gamma^n \sum_a \pi(a|S_{t+n}) Q_{t+n-1}(S_{t+n},a) \end{equation} \subsection{$n$-step Off-policy Learning} note: $V_{t+n}$ and $Q_{t+n}$ are the result of ($t+n$)th iteration. \subsubsection{$n$-step Off-policy TD} for $0 \leq t < T$, the update formula is: \begin{equation} V_{t+n}(S_t)=V_{t+n-1}(S_t)+\alpha \prod_{k=t}^{\min (h,T-1)} \frac{\pi(A_k|S_k)}{b(A_k|S_k)} [G_{t:t+n} - V_{t+n-1}(S_t)] \end{equation} \subsubsection{$n$-step Off-policy Sarsa} for $0 \leq t < T$, the update formula is: \begin{equation} \begin{split} Q_{t+n}(S_t,A_t)=&Q_{t+n-1}(S_t,A_t) \\ &+\alpha \prod_{k=t}^{\min (h,T-1)} \frac{\pi(A_k|S_k)}{b(A_k|S_k)} [G_{t:t+n} - Q_{t+n-1}(S_t,A_t)] \end{split} \end{equation} See Algorithm (\ref{algo:nstepoffsarsa}) for detail. \begin{algorithm} \caption{Off-policy $n$-step Sarsa, estimate $q_\pi$ or $q_*$}\label{algo:nstepoffsarsa} \begin{algorithmic}[1] \State $ \alpha \in (0,1]$ \State $Q(s,a) \gets$ random \State $\pi \gets$ random $\varepsilon$-greedy policy \State $t \gets 0$ \Statex \Loop \State choose $S_0$ \State choose action $A_0 \sim \pi (\cdot | S_0)$ \State $T \gets \infty$ \While{$\tau < T - 1$} \If{$t < T$} \State take action $A_t$ and store $R_{t+1}$ and $S_{t+1}$ \If{$S_{t+1}$ is terminal} \State $T \gets t+1$ \Else \State choose $A_{t+1} \sim \pi(\cdot|S_{t+1})$ \EndIf \EndIf \State $\tau \gets t - n + 1$ \Comment $\tau$ is the pivot of update \If{$\tau \geq 0$} \State $\rho \gets \prod\limits_{i=\tau+1}^{\min (\tau+n-1,T-1)} \frac{\pi(A_i|S_i)}{b(A_i|S_i)}$ \State $G \gets \sum\limits_{i=\tau+1}^{\min (\tau+n,T)} \gamma^{i-\tau-1} R_i$ \If{$\tau + n < T$} \State $G \gets G + \gamma^n Q(S_{\tau + n}, A_{\tau + n})$ \Comment $G_{\tau:\tau+n}$ \EndIf \State $Q(S_{\tau}, A_{\tau}) \gets Q(S_{\tau}, A_{\tau}) + \alpha \rho \Big(G - Q(S_{\tau}, A_{\tau})\Big)$ \State update $\pi_*$ \Comment update as a $\varepsilon$-greedy policy if calculating $q_*$ \EndIf \State $t \gets t+1$ \EndWhile \EndLoop \end{algorithmic} \end{algorithm} \subsection{$n$-step Tree Backup Algorithm} This is an \cindex{off-policy} learning algorithm without \cindex{importance sampling}. For each step along the sampling, the non-visited notes contribute probabilistic result according to the policy. The visited node will contribute the updated bootstrapping result. \begin{equation} \begin{split} G_{t:t+n}&=R_{t+1}\\ &+\gamma \sum_{a\neq A_{t+1}} \pi(a|S_{t+1}) Q_{t+n-1}(S_{t+1},a) \text{ \# other branches} \\ &+ \gamma \pi(A_{t+1}|S_{t+1})G_{t+1:t+n} \text{ \# main sample path} \end{split} \end{equation} See Algorithm (\ref{algo:nsteptreebackup}) for detail. \begin{algorithm} \caption{$n$-step tree backup, estimate $q_\pi$ or $q_*$}\label{algo:nsteptreebackup} \begin{algorithmic}[1] \State $ \alpha \in (0,1]$ \State $Q(s,a) \gets$ random \State $\pi \gets$ random $\varepsilon$-greedy policy \Statex \Loop \State choose $S_0$ \State choose action $A_0 \sim \pi (\cdot | S_0)$ \State $T \gets \infty$ \State $t \gets 0$ \While{$\tau < T - 1$} \If{$t < T$} \State take action $A_t$ and store $R_{t+1}$ and $S_{t+1}$ \If{$S_{t+1}$ is terminal} \State $T \gets t+1$ \Else \State choose $A_{t+1} \sim \pi(\cdot|S_{t+1})$ \EndIf \EndIf \State $\tau \gets t - n + 1$ \Comment $\tau$ is the pivot of update \If{$\tau \geq 0$} \If{$t+1 \geq T$} \State $G \gets R_T$ \Else \State $G \gets R_{t+1} + \gamma \sum\limits_a \pi(a|S_{t+1}) Q(S_{t+1},a)$ \EndIf \State \Comment update $G$ backward using tree-backup method \For{$k \gets [\tau+1,\dots,\min (t,T-1)]$} \State $G \gets R_k + \gamma \sum\limits_{a\neq A_k} \pi(a|S_k)Q(S_k,a) + \gamma \pi (A_k|S_k)G$ \EndFor \State $Q(S_{\tau}, A_{\tau}) \gets Q(S_{\tau}, A_{\tau}) + \alpha \Big(G - Q(S_{\tau}, A_{\tau})\Big)$ \State update $\pi_*$ \Comment update as a $\varepsilon$-greedy policy if calculating $q_*$ \EndIf \State $t \gets t+1$ \EndWhile \EndLoop \end{algorithmic} \end{algorithm} \subsection{$n$-step off-policy $Q(\sigma )$} Let random variable $\sigma_t \sim \text{Bern}(0,1)$ be the probability of sampling on step $t$, with $\sigma = 1$ means full sampling and $\sigma = 0$ means pure expectation. The formula is: \begin{equation} \begin{split} G_{t:h} =& R_{t+1}+\gamma \sum_{a\neq A_{t+1}} \pi(a|S_{t+1}) Q_{h-1}(S_{t+1},a) + \gamma \pi(A_{t+1}|S_{t+1})G_{t+1:h} \\ =& R_{t+1} + \left (\gamma \sum_a \pi(a|S_{t+1})Q_{h-1}(S_{t+1},a) - \gamma \pi(A_{t+1}|S_{t+1})Q_{h-1}(S_{t+1},A_{t+1}) \right)\\ &+ \gamma \pi(A_{t+1}|S_{t+1})G_{t+1:h} \\ =& R_{t+1} + \gamma \sum_a \pi(a|S_{t+1})Q(S_{t+1},a) \\ &+ \gamma \pi(A_{t+1}|S_{t+1})\Big (G_{t+1:h} -Q_{h-1}(S_{t+1},A_{t+1}) \Big) \end{split} \end{equation} Replace $\pi(A_{t+1}|S_{t+1})$ by $\Big(\sigma_{t+1} \rho_{t+1} + (1-\rho_{t+1})\pi(A_{t+1}|S_{t+1}) \Big)$ ($\rho$ is the important sampling ratio defined in formula (\ref{importancsamplingratio})) we have: \begin{equation} \begin{split} G_{t:h} =& R_{t+1} + \gamma \sum_a \pi(a|S_{k+1})Q(S_{k+1},a) \\ &+ \gamma \Big ( \sigma_{t+1} \rho_{t+1} + (1-\rho_{t+1})\pi(A_{t+1}|S_{t+1}) \Big) \Big(G_{t+1:h} -Q_{h-1}(S_{t+1},A_{t+1}) \Big) \end{split} \end{equation} $\sum\limits_a \pi(a|S_{t})Q(S_{t},a)$ is called \cindex{expected approximate value} of state $S_t$. See Algorithm (\ref{algo:nstepoffrho}) for detail. \begin{algorithm} \caption{Off-policy $n$-step $Q(\sigma)$, estimate $q_\pi$ or $q_*$}\label{algo:nstepoffrho} \begin{algorithmic}[1] \State $ \alpha \in (0,1]$ \State $Q(s,a) \gets$ random \State $\pi \gets$ random $\varepsilon$-greedy policy \State random policy $b$ that $\forall a\in \mathcal{A}, s\in \mathcal{S}, b(a|s) > 0$ \State $t \gets 0$ \Statex \Loop \State choose $S_0$ \State choose action $A_0 \sim b (\cdot | S_0)$ \State $T \gets \infty$ \While{$\tau < T - 1$} \If{$t < T$} \State take action $A_t$ and store $R_{t+1}$ and $S_{t+1}$ \If{$S_{t+1}$ is terminal} \State $T \gets t+1$ \Else \State choose $A_{t+1} \sim b(\cdot|S_{t+1})$ \State choose $\sigma_{t+1} \in \{ 0, 1\}$ \Comment $\sigma$ is either $0$ or $1$ \State $\rho_{t+1} \gets \frac{\pi(A_{t+1}|S_{t+1})}{b(A_{t+1}|S_{t+1})}$ \EndIf \EndIf \State $\tau \gets t - n + 1$ \Comment $\tau$ is the pivot of update \If{$\tau \geq 0$} \For{$k \gets \Big [\min (t+1,T),\dots,\tau+1 \Big ]$} \If{$k=T$} \State $G \gets R_T$ \Else \State $\overline{V} \gets \sum\limits_a \pi(a|S_{k})Q(S_{k},a)$ \State $G \gets R_{k} + \gamma \overline{V} + \gamma \Big ( \sigma_{k} \rho_{k} + \big(1-\rho_{k})\pi(A_{k}|S_{k}) \Big) \Big(G -Q(S_{k},A_{k}) \Big )$ \EndIf \EndFor \State $G(S_\tau,A_\tau) \gets G(S_\tau,A_\tau) + \alpha \Big(G - G(S_\tau,A_\tau)\Big)$ \State update $\pi_*$ \Comment update as a $\varepsilon$-greedy policy if calculating $q_*$ \EndIf \State $t \gets t+1$ \EndWhile \EndLoop \end{algorithmic} \end{algorithm}
{ "alphanum_fraction": 0.5755314124, "avg_line_length": 30.860058309, "ext": "tex", "hexsha": "155e919f61c2050e437d3a0595964d25569cb83e", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-10-01T23:34:47.000Z", "max_forks_repo_forks_event_min_datetime": "2020-10-01T23:34:47.000Z", "max_forks_repo_head_hexsha": "d12ac08d30be4341776714ad895116a243ec026f", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "elvisren/machine-learning-notes", "max_forks_repo_path": "src/reinforcement_learning/rl.6.nstep_sarsa.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "d12ac08d30be4341776714ad895116a243ec026f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "elvisren/machine-learning-notes", "max_issues_repo_path": "src/reinforcement_learning/rl.6.nstep_sarsa.tex", "max_line_length": 267, "max_stars_count": 2, "max_stars_repo_head_hexsha": "d12ac08d30be4341776714ad895116a243ec026f", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "elvisren/machine-learning-notes", "max_stars_repo_path": "src/reinforcement_learning/rl.6.nstep_sarsa.tex", "max_stars_repo_stars_event_max_datetime": "2020-12-04T17:28:22.000Z", "max_stars_repo_stars_event_min_datetime": "2020-05-07T03:05:08.000Z", "num_tokens": 4487, "size": 10585 }
\documentclass[10pt,a4paper,twoside]{report} \input{preamble} \begin{document} \newpage \section*{Acknowledgments} These last years spent in Nantes as a bachelor, master, and PhD student have been a real pleasure and will leave me full of good memories. First, I would like to thank the directors of this thesis, Laurent Stainier and Thomas Heuz{\'e}, for trusting me for these research. They both guided me very patiently throughout this work, especially during the reviewing of manuscripts. In addition, I must emphasize the very good musical tastes of Laurent Stainier and the gastronomic tastes of Thomas Heuz{\'e}, which have been the subject of interesting discussions. Owing to their moral values and scientific skills, I have been very happy to work with them. Second, I am grateful to several faculty members met when I was a master student at Nantes University, who then became colleagues: Jean-Christophe Thomas, Mathilde Chevreuil, Marc Fran{\c c}ois, Anh Le van, Rabah Bouzidi and Laurent Gornet. Their courses showed me how much computational mechanics can be interesting though challenging. Third, I also want to thank my "building F colleagues": Baptiste and Erwan (along with the other CBEM organizers), Pierre, Rohit, Jorge, Beno{\^i}t, Romain, Simon, George, Tauno, Abdullah, Zolt{\'a}n, C{\'e}cile, Daria, Quentin, Steven, Fabien, Hong-Duc, Elie, Rhapha{\"e}l, Adrien and Ataollah, as well as the "building T people": Julie, Marie and Nicolas. They all contributed to an atmosphere which helps during difficult times. This reminded me the good old days at the university with Jan, Cocaud, Laporte, Le Mouel, Mac{\'e}, "Le Commodore" Varin and Claude. Fourth, I am thankful to Cynthia and Marine who managed the administrative difficulties that I encountered. Moreover, I thank Anthony who saved me by lending me his laptop the day of my defense. Fifth, I would like to thank Morgane for considerably supporting me and for undergoing my mood swings. I also must thank Doroth{\'e}e, Hana{\"e} and Ga{\"e}l for lending me their house so that I could write peacefully. At last, I am grateful to my friends and family for coming to my defense and to the celebration of my birthday the next day. They all supported me during these three years, especially my sister Emmanuelle by the end of her pregnancy. \tableofcontents{} %% Chapter 1: Introduction \chapter{Introduction} \input{chapter1/mainChapter1} % Parler du choix des méthodes explicite pour suivre les charactéristics Vs implicites %% Chapter 2: Hyperbolic partial differential equations \chapter{Hyperbolic partial differential equations for solid dynamics} \label{chap:chap2} \input{chapter2/mainChapter2} %% Chapter 3: The Discontinuous Galerkin Material Point Method \chapter{The Discontinuous Galerkin Material Point Method} \label{chap:chap3} \input{chapter3/mainChapter3} %% Chapter 4: Numerical Results \chapter{Numerical Results} \label{chap:chap4} \input{chapter4/mainChapter4} %% Chapter 5: Contribution to the solution of two-dimensional elastoplastic problems \chapter{Contribution to the solution of elastic-plastic hyperbolic problems in two space dimensions} \label{chap:chap5} \input{chapter5/mainChapter5} %% Chapter 6: Conclusion \chapter{Conclusion and future works} \input{chapter6/mainChapter6} \addcontentsline{toc}{chapter}{References} \bibliographystyle{ieeetr} \bibliography{Biblio} % \newpage % \AddToShipoutPicture*{\BackgroundBPage} % \thispagestyle{empty} % ~~\\ % \ClearShipoutPicture \end{document} %%% Local Variables: %%% mode: latex %%% ispell-local-dictionary: "american" %%% TeX-master: t %%% End:
{ "alphanum_fraction": 0.7815934066, "avg_line_length": 40.8988764045, "ext": "tex", "hexsha": "1d33855d1b823e7a04ee02ad77be8f4597b1c9a1", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2f0062a1800d7a17577bbfc2393b084253d567f4", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adRenaud/research", "max_forks_repo_path": "manuscript/mainManuscript.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "2f0062a1800d7a17577bbfc2393b084253d567f4", "max_issues_repo_issues_event_max_datetime": "2019-01-07T13:11:11.000Z", "max_issues_repo_issues_event_min_datetime": "2019-01-07T13:11:11.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adRenaud/research", "max_issues_repo_path": "manuscript/mainManuscript.tex", "max_line_length": 357, "max_stars_count": 1, "max_stars_repo_head_hexsha": "2f0062a1800d7a17577bbfc2393b084253d567f4", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adRenaud/research", "max_stars_repo_path": "manuscript/mainManuscript.tex", "max_stars_repo_stars_event_max_datetime": "2021-06-18T14:52:03.000Z", "max_stars_repo_stars_event_min_datetime": "2021-06-18T14:52:03.000Z", "num_tokens": 912, "size": 3640 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Beamer Presentation % LaTeX Template % Version 1.0 (10/11/12) % % This template has been downloaded from: % http://www.LaTeXTemplates.com % % License: % CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/) % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %---------------------------------------------------------------------------------------- % PACKAGES AND THEMES %---------------------------------------------------------------------------------------- \documentclass{beamer} \mode<presentation> { % The Beamer class comes with a number of default slide themes % which change the colors and layouts of slides. Below this is a list % of all the themes, uncomment each in turn to see what they look like. %\usetheme{default} %\usetheme{AnnArbor} %\usetheme{Antibes} %\usetheme{Bergen} %\usetheme{Berkeley} %\usetheme{Berlin} %%\usetheme{Boadilla} %\usetheme{CambridgeUS} %\usetheme{Copenhagen} %\usetheme{Darmstadt} %%\usetheme{Dresden} %\usetheme{Frankfurt} %\usetheme{Goettingen} %\usetheme{Hannover} %\usetheme{Ilmenau} %\usetheme{JuanLesPins} %\usetheme{Luebeck} \usetheme{Madrid} %\usetheme{Malmoe} %\usetheme{Marburg} %\usetheme{Montpellier} %\usetheme{PaloAlto} %\usetheme{Pittsburgh} %\usetheme{Rochester} %\usetheme{Singapore} %\usetheme{Szeged} %\usetheme{Warsaw} % As well as themes, the Beamer class has a number of color themes % for any slide theme. Uncomment each of these in turn to see how it % changes the colors of your current slide theme. %\usecolortheme{albatross} \usecolortheme{beaver} %\usecolortheme{beetle} %\usecolortheme{crane} %\usecolortheme{dolphin} %%\usecolortheme{dove} %\usecolortheme{fly} %\usecolortheme{lily} %\usecolortheme{orchid} %\usecolortheme{rose} %\usecolortheme{seagull} %\usecolortheme{seahorse} %\usecolortheme{whale} %\usecolortheme{wolverine} %\setbeamertemplate{footline} % To remove the footer line in all slides uncomment this line %\setbeamertemplate{footline}[page number] % To replace the footer line in all slides with a simple slide count uncomment this line %\setbeamertemplate{navigation symbols}{} % To remove the navigation symbols from the bottom of all slides uncomment this line } \usepackage{graphicx} % Allows including images \usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables \usepackage{lmodern} \usepackage{amsfonts} \usepackage{amsmath} \usepackage{mathtools} \usepackage{listings} % C++ code \lstset{language=C++, basicstyle=\footnotesize\ttfamily, keywordstyle=\footnotesize\color{blue}\ttfamily, } %---------------------------------------------------------------------------------------- % TITLE PAGE %---------------------------------------------------------------------------------------- \title[Sorting Algorithms]{Sorting Algorithms Analysis} % The short title appears at the bottom of every slide, the full title is only on the title page \author{Ulises M\'endez Mart\'{i}nez} % Your name \institute[UTM] % Your institution as it will appear on the bottom of every slide, may be shorthand to save space { Design and Analysis of Algorithms 2016-01 \\ % Your institution for the title page \medskip \textit{[email protected]} % Your email address } \date{\today} % Date, can be changed to a custom date \begin{document} \begin{frame} \titlepage % Print the title page as the first slide \end{frame} %--------------------------------------------- %\begin{frame} %\frametitle{Overview} % Table of contents slide, comment this block out to remove it %\onslide<2-> %\begin{figure} %\includegraphics[width=0.45\linewidth]{z_image.png} %\end{figure} %\end{frame} %-------------------------------------------- \begin{frame} \frametitle{Overview} % Table of contents slide, comment this block out to remove it \tableofcontents % Throughout your presentation, if you choose to use \end{frame} %---------------------------------------------------------------------------------------- % PRESENTATION SLIDES %---------------------------------------------------------------------------------------- %------------------------------------------------ \section{Merge Sort} \begin{frame}[fragile] \frametitle{Merge Sort} Is an efficient, general-purpose, comparison-based sorting algorithm, produce a stable sort, which means that preserves the input order of equal elements in the sorted output. Mergesort is a divide and conquer algorithm that was invented by John von Neumann in 1945. \subsection{Implementation} \begin{block}{Function call} \begin{lstlisting} i64 merge_sort(int data[], int size) { for(int i=0; i<size; i++) // Initialize { m_array[i] = m_aux[i] = data[i]; } i64 movs = merge(m_array,m_aux,0,size-1); return movs; } \end{lstlisting} \end{block} \end{frame} \begin{frame}[fragile] \frametitle{ Implementation } %\begin{example}[ C++ Implementation ] \begin{lstlisting} i64 merge(int v[], int va[], int L, int R) { i64 cnt = 0LL; if(L<R) { int mid = (L+R)/2; cnt += merge(va, v, L, mid); cnt += merge(va, v, mid+1, R); int i=L, j=mid+1, k=L; while( i<=mid && j<=R ) { if(va[i]<=va[j]) v[k++]=va[i++]; else { v[k++]=va[j++]; cnt+=(mid+1) - i; } } while(i<=mid) v[k++]=va[i++]; while(j<= R ) v[k++]=va[j++]; } return cnt; } \end{lstlisting} %\end{example} \end{frame} %------------------------------------------------ \section{Heap Sort} \begin{frame}[fragile] \frametitle{Heap Sort} Is a comparison-based sorting algorithm. It divides its input into a sorted and an unsorted region, and it iteratively shrinks the unsorted region by extracting the largest element and moving that to the sorted region. The improvement consists of the use of a heap data structure rather than a linear-time search to find the maximum. \subsection{Implementation} \begin{block}{Function call} \begin{lstlisting} void heap_sort(int data[], int size) { heap_init(); for(int i=1; i<=size; i++){ heap_insert(h_aux, data[i-1]); } for(int i = 0; i < size; i++){ heap_delete(h_aux, h_array[i]); } } \end{lstlisting} \end{block} \end{frame} \begin{frame}[fragile] \frametitle{Insertion} \begin{block}{Insert function} \begin{lstlisting} void heap_insert(int heap[], int val) { int parent=0, node=++h_size; heap[node] = val; while(!is_root(node)) { parent = get_parent(node); if(heap[node] >= heap[parent]) break; swap(heap[node], heap[parent]); node = parent; } } \end{lstlisting} \end{block} \end{frame} \begin{frame}[fragile] \frametitle{Deletion} \begin{block}{Delete function} \begin{lstlisting} void heap_delete(int heap[], int &val) { val = heap[h_root]; h_size--; if(h_size >= h_root) { // Set the last element heap[h_root] = heap[h_size+1]; int node, small=h_root; do{ node = small; int left = node << 1; int right = left + 1; if(left<=h_size && heap[left]<heap[small]) small = left; if(right<=h_size && heap[right]<heap[small]) small = right; swap(heap[node],heap[small]); }while(small != node); } } \end{lstlisting} \end{block} \end{frame} %------------------------------------------------ \section{Quick Sort} \begin{frame}[fragile] \frametitle{Quick Sort} Is an efficient sorting algorithm. Developed by Tony Hoare in 1959. When implemented well, it can be about two or three times faster than its main competitors, merge sort and heapsort. Quicksort is a comparison sort, meaning that it can sort items of any type for which a "less-than" relation is defined. In efficient implementations it is not a stable sort, meaning that the relative order of equal sort items is not preserved. Quicksort can operate in-place on an array, requiring small additional amounts of memory to perform the sorting. \subsection{Implementation} \begin{block}{Function call} \begin{lstlisting} void q_sort(int A[], int p, int r) { if(p < r) { int q = partition(A,p,r,q); q_sort(A, p, q-1); q_sort(A, q+1, r); } } \end{lstlisting} \end{block} \end{frame} %------------------------------------------------ %------------------------------------------------ \section{Comparison} \begin{frame} \frametitle{Input based Comparison} \begin{table} \small \begin{tabular}{| c | c | c | c | c |} \toprule $Input$ & Ordered & Ordered Inverse & Almost ordered & Random \\ %\hline \midrule Merge & 7547 & 7578 & 9305 & 17387 \\ Heap & 30941 & 45921 & 28126 & 33257 \\ QS Fixed & 37705066 & 25637125 & 2545834 & 19879 \\ QS Random & 12464 & 13249 & 13927 & 22118 \\ \bottomrule \end{tabular} \caption{Time spent in us} \end{table} \end{frame} \section{Comparison} \begin{frame} \frametitle{Conclusion} \begin{block}{Conclusion} Based on data obtained, we can conclude the algorithm better fit our propose in time complexity is merge sort, also we could notice an improvement when instead of take the last element in partition section of quick sort we take a random pivot. \end{block} \end{frame} \end{document}
{ "alphanum_fraction": 0.6127892031, "avg_line_length": 29.8274760383, "ext": "tex", "hexsha": "69e27f9ce7387791fa992f7e5b213e9b43df2546", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b67fe8b72e6c4ddc422204b8b25a8baa0f9e4829", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Gansito144/CSM", "max_forks_repo_path": "subjects/Analysis-of-Algorithms/task1/analysis.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b67fe8b72e6c4ddc422204b8b25a8baa0f9e4829", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Gansito144/CSM", "max_issues_repo_path": "subjects/Analysis-of-Algorithms/task1/analysis.tex", "max_line_length": 356, "max_stars_count": null, "max_stars_repo_head_hexsha": "b67fe8b72e6c4ddc422204b8b25a8baa0f9e4829", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Gansito144/CSM", "max_stars_repo_path": "subjects/Analysis-of-Algorithms/task1/analysis.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2467, "size": 9336 }
\section{Aim and Scope}\label{scope} One of the main areas of development for the Australian METS sector is WiFi-enabled digital-electronic sensing systems, commonly known as Internet-of Things (IoT) applications . In 2018 METS Ignited issued a call for interested parties to develop real or near real-world testing environments. Within the two-year Living Lab project launch that year, the project was able to gather and create documentation on procedures for Derisking Mine Site Trials and Accelerated Life Testing. The documentation needed a platform to be able to share with the public as well as to showcase the services and capability of the Living Lab. This project is focused in developing a web platform to document procedures created by the Living Lab as well as to engage resource companies to get involved with the Living Lab. The list of documentation that will be showcased are: \begin{itemize} \item Common testing procedures in the industry for MEMS-IoT devices \item Equipment and Limitation of the Living Lab \item Procedures and Standards for ALT testing for MEMS-IoT devices \item Administrative, legal, safety, and technical procedures to support university trials and derisk mine site trials \item A set of checklists, co-developed with mining and METS companies, for mine site trials \end{itemize} For engaging resource companies, and promoting the Living Lab brand. These are what needs to be implemented: \begin{itemize} \item Branding, color, and styling consistent with the existing Living Lab branding \item Contact details for the Living Lab Team \item Case Studies - previous work of the Living Lab \end{itemize}
{ "alphanum_fraction": 0.7800466744, "avg_line_length": 85.7, "ext": "tex", "hexsha": "6724601a85eb0e445ff031b2986066addcfae30d", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2020-09-05T02:14:52.000Z", "max_forks_repo_forks_event_min_datetime": "2020-07-08T03:47:02.000Z", "max_forks_repo_head_hexsha": "e253d6e30498edc2e665bd51c43fbe5ec55fd5d1", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "eddie-atkinson/livinglab-site-v3", "max_forks_repo_path": "requirements-document-may2020/AimAndScope/index.tex", "max_issues_count": 123, "max_issues_repo_head_hexsha": "02e50fbdfa51f26d61ac9178f1bcadfed4668fe5", "max_issues_repo_issues_event_max_datetime": "2022-02-13T17:40:58.000Z", "max_issues_repo_issues_event_min_datetime": "2020-07-09T02:13:10.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "uwasystemhealth/livinglab-site-v3", "max_issues_repo_path": "requirements-document-may2020/AimAndScope/index.tex", "max_line_length": 659, "max_stars_count": null, "max_stars_repo_head_hexsha": "02e50fbdfa51f26d61ac9178f1bcadfed4668fe5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "uwasystemhealth/livinglab-site-v3", "max_stars_repo_path": "requirements-document-may2020/AimAndScope/index.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 390, "size": 1714 }
%% For double-blind review submission, w/o CCS and ACM Reference (max submission space) % \documentclass[sigplan,10pt,review]{acmart} % \settopmatter{printfolios=true,printccs=false,printacmref=false} %% For double-blind review submission, w/ CCS and ACM Reference %\documentclass[sigplan,review,anonymous]{acmart}\settopmatter{printfolios=true} %% For single-blind review submission, w/o CCS and ACM Reference (max submission space) %\documentclass[sigplan,review]{acmart}\settopmatter{printfolios=true,printccs=false,printacmref=false} %% For single-blind review submission, w/ CCS and ACM Reference %\documentclass[sigplan,review]{acmart}\settopmatter{printfolios=true} %% For final camera-ready submission, w/ required CCS and ACM Reference \documentclass[sigplan,screen]{acmart}\settopmatter{} %% Conference information %% Supplied to authors by publisher for camera-ready submission; %% use defaults for review submission. % \startPage{1} %% Copyright information %% Supplied to authors (based on authors' rights management selection; %% see authors.acm.org) by publisher for camera-ready submission; %% use 'none' for review submission. %% Bibliography style \bibliographystyle{ACM-Reference-Format} %% Citation style % \citestyle{acmauthoryear} %% For author/year citations \citestyle{acmnumeric} %% For numeric citations %\setcitestyle{nosort} %% With 'acmnumeric', to disable automatic %% sorting of references within a single citation; %% e.g., \cite{Smith99,Carpenter05,Baker12} %% rendered as [14,5,2] rather than [2,5,14]. %\setcitesyle{nocompress} %% With 'acmnumeric', to disable automatic %% compression of sequential references within a %% single citation; %% e.g., \cite{Baker12,Baker14,Baker16} %% rendered as [2,3,4] rather than [2-4]. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Note: Authors migrating a paper from traditional SIGPLAN %% proceedings format to PACMPL format must update the %% '\documentclass' and topmatter commands above; see %% 'acmart-pacmpl-template.tex'. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Some recommended packages. \usepackage{booktabs} %% For formal tables: %% http://ctan.org/pkg/booktabs \usepackage{subcaption} %% For complex figures with subfigures/subcaptions %% http://ctan.org/pkg/subcaption \begin{document} %% Title information \title[POPLMark Reloaded]{POPLMark Reloaded} %% [Short Title] is optional; %% when present, will be used in %% header instead of Full Title. % \titlenote{with title note} %% \titlenote is optional; %% can be repeated if necessary; %% contents suppressed with 'anonymous' \subtitle{Mechanizing Logical Relations Proofs \\ (Invited Talk) } %% \subtitle is optional % \subtitlenote{Invited talk} %% \subtitlenote is optional; %% can be repeated if necessary; %% contents suppressed with 'anonymous' %% Author information %% Contents and number of authors suppressed with 'anonymous'. %% Each author should be introduced by \author, followed by %% \authornote (optional), \orcid (optional), \affiliation, and %% \email. %% An author may have multiple affiliations and/or emails; repeat the %% appropriate command. %% Many elements are not rendered, but should be provided for metadata %% extraction tools. %% Author with single affiliation. \author{Brigitte Pientka} % \authornote{with author1 note} %% \authornote is optional; %% can be repeated if necessary \orcid{nnnn-nnnn-nnnn-nnnn} %% \orcid is optional \affiliation{ % \position{Position1} % \department{School of Computer Science} %% \department is recommended \institution{McGill University} %% \institution is required % \streetaddress{Street1 Address1} \city{Montreal} \state{QC} \country{Canada} %% \country is recommended } \email{[email protected]} %% \email is recommended %% Abstract % Note: \begin{abstract}...\end{abstract} environment must come %% before \maketitle command \begin{abstract} Mechanizing formal systems, given via axioms and inference rules, together with proofs about them plays an important role in establishing trust in formal developments. Over the past decade, the POPLMark challenge \citep{Aydemir05TPHOLs} popularized the use of proof assistants in mechanizing the metatheory of programming languages. Focusing on the the meta-theory of $\mathtt{F_{<:}}$, it allowed the programming languages community to survey existing techniques to represent and reason about syntactic structures with binders and promote the use of proof assistants. Today, mechanizing proofs is a stable fixture in the daily life of programming languages researchers. As a follow-up to the POPLMark Challenge, we propose a new collection of benchmarks that use proofs by logical relations. Such proofs are now used to attack problems in the theory of complex languages models, with applications to issues in equivalence of programs, {compiler correctness}, representation independence and even more intensional properties such as non-interference, differential privacy and secure multi-language inter-operability (see for example ~\citep{Ahmed15,BowmanA15,NeisHKMDV15}). Yet, they remain challenging to mechanize. % The goal of these benchmarks is to better % understand how to factor out a generic infrastructure for % common and recurring issues beyond representing variables and achieve % mechanized proofs that are more readable and easier to maintain. We see % this not merely as an engineering challenge, but hope these % benchmarks highlight the difficulties in mechanizing such proofs and also % lead to cross-fertilization between general proof environments such as % Coq, Agda or Isabelle on the one hand and specialized frameworks such % as Abella and Beluga that target representing and reasoning about % structures with binders on the other hand. % This eliminates the potential for mistakes in building up % the primitive infrastructure and more importantly it lies the ground for % representing proofs compactly and automating proofs efficiently, since % the search space concentrates on essential parts and does not get % bogged in the quagmire of bureaucratic details. In this talk, we focus on one particular challenge problem, namely strong normalization of a simply-typed lambda-calculus with a proof by Kripke-style logical relations. We will advocate a modern view of this well-understood problem by formulating our logical relation on well-typed terms. % This % focus on reasoning about well-typed terms necessitates reasoning about % Kripke-style context extensions. Using this case study, we share some of the lessons learned tackling this challenge problem in Beluga \cite{PientkaC15}, a proof environment that supports higher-order abstract syntax encodings, first-class context and first-class substitutions. {We also discuss and highlight similarities, strategies, and limitations % \newline in other proof assistants when tackling this challenge problem.} % % % \newline\indent We hope others will be motivated to submit solutions! The goal of this talk is to engage the community in discussions on what support in proof environments is needed to truly bring mechanized metatheory to the masses. \end{abstract} %%% The following is specific to CPP'18 and the paper %%% 'POPLMark Reloaded: Mechanizing Logical Relations Proofs (Invited Talk)' %%% by Brigitte Pientka. %%% \setcopyright{rightsretained} \acmPrice{15.00} \acmDOI{10.1145/3167102} \acmYear{2018} \copyrightyear{2018} \acmISBN{978-1-4503-5586-5/18/01} \acmConference[CPP'18]{7th ACM SIGPLAN International Conference on Certified Programs and Proofs}{January 8--9, 2018}{Los Angeles, CA, USA} %% 2012 ACM Computing Classification System (CSS) concepts %% Generate at 'http://dl.acm.org/ccs/ccs.cfm'. \begin{CCSXML} <ccs2012> <concept> <concept_id>10003752.10003790.10002990</concept_id> <concept_desc>Theory of computation~Logic and verification</concept_desc> <concept_significance>500</concept_significance> </concept> <concept> <concept_id>10003752.10003790.10011740</concept_id> <concept_desc>Theory of computation~Type theory</concept_desc> <concept_significance>500</concept_significance> </concept> <concept> <concept_id>10003752.10003790.10003794</concept_id> <concept_desc>Theory of computation~Automated reasoning</concept_desc> <concept_significance>300</concept_significance> </concept> <concept> <concept_id>10011007.10011006.10011008.10011024.10011038</concept_id> <concept_desc>Software and its engineering~Frameworks</concept_desc> <concept_significance>300</concept_significance> </concept> <concept> <concept_id>10011007.10011006.10011039.10011040</concept_id> <concept_desc>Software and its engineering~Syntax</concept_desc> <concept_significance>300</concept_significance> </concept> <concept> <concept_id>10011007.10011006.10011039.10011311</concept_id> <concept_desc>Software and its engineering~Semantics</concept_desc> <concept_significance>300</concept_significance> </concept> </ccs2012> \end{CCSXML} \ccsdesc[500]{Theory of computation~Logic and verification} \ccsdesc[500]{Theory of computation~Type theory} \ccsdesc[300]{Theory of computation~Automated reasoning} \ccsdesc[300]{Software and its engineering~Frameworks} \ccsdesc[300]{Software and its engineering~Syntax} \ccsdesc[300]{Software and its engineering~Semantics} %% End of generated code %% Keywords %% comma separated list \keywords{Mechanizing Metatheory, Logical Frameworks, Proof Assistants} %% \keywords are mandatory in final camera-ready submission \maketitle %% Note: \maketitle command must come after title commands, author %% commands, abstract environment, Computing Classification System %% environment and commands, and keywords command. %% Acknowledgments \begin{acks} This is joint work with Andreas Abel, Aliya Hameer, Alberto Momigliano, Kathrin Stark, and Steven Schaefer. %% acks environment is optional %% contents suppressed with 'anonymous' %% Commands \grantsponsor{<sponsorID>}{<name>}{<url>} and %% \grantnum[<url>]{<sponsorID>}{<number>} should be used to %% acknowledge financial support and will be used by metadata %% extraction tools. This material is based upon work supported by the \grantsponsor{GS100000001}{Natural Sciences and Engineering Research Council (NSERC) of Canada}{http://dx.doi.org/10.13039/100000001}. % under Grant % No.~\grantnum{206263}{206263}. % Any opinions, findings, and % conclusions or recommendations expressed in this material are those % of the author and do not necessarily reflect the views of NSERC. \end{acks} %% Bibliography \bibliography{cpp} %% Appendix % \appendix % \section{Appendix} % Text of appendix \ldots \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End:
{ "alphanum_fraction": 0.7238916471, "avg_line_length": 41.7490909091, "ext": "tex", "hexsha": "f063fc4e7e36a5e5d690fe4af804a848768f5c15", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2018-02-23T18:22:17.000Z", "max_forks_repo_forks_event_min_datetime": "2017-11-10T16:44:52.000Z", "max_forks_repo_head_hexsha": "79d97481f3312c2d30a823c3b1bcb8ae871c2fe2", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "ryanakca/strong-normalization", "max_forks_repo_path": "cpp18-invited/cpp18-invited.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "79d97481f3312c2d30a823c3b1bcb8ae871c2fe2", "max_issues_repo_issues_event_max_datetime": "2018-02-20T14:54:18.000Z", "max_issues_repo_issues_event_min_datetime": "2018-02-14T16:42:36.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "ryanakca/strong-normalization", "max_issues_repo_path": "cpp18-invited/cpp18-invited.tex", "max_line_length": 139, "max_stars_count": 32, "max_stars_repo_head_hexsha": "79d97481f3312c2d30a823c3b1bcb8ae871c2fe2", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "ryanakca/strong-normalization", "max_stars_repo_path": "cpp18-invited/cpp18-invited.tex", "max_stars_repo_stars_event_max_datetime": "2021-03-05T12:12:03.000Z", "max_stars_repo_stars_event_min_datetime": "2017-05-22T14:33:27.000Z", "num_tokens": 2714, "size": 11481 }
% ----------------------------------------------- % chktex-file 44 \documentclass[../index.tex]{subfiles} % ----------------------------------------------- \begin{document} % ----------------------------------------------- \renewcommand{\sectiontitle}{More advanced tooling} \section{\sectiontitle} % --------------------------- \renewcommand{\currenttitle}{Using external libraries} \begin{frame}[fragile]{\currenttitle} You may want to pull in external libraries into your extension. \\[1em] In the JS world, you have many (too many) options: \begin{itemize} \item Store a copy of the library in your extension \item Use a CDN \item Use a package manager to download the library \item Use a package manager + bundler \end{itemize} \end{frame} % --------------------------- \renewcommand{\currenttitle}{Storing a copy of the library} \begin{frame}[fragile]{\currenttitle} Let's say we want to use the jQuery library: \begin{enumerate} \item Download\footnote{https://code.jquery.com/jquery-3.6.0.min.js} into your project \item Load the file in the necessary places \end{enumerate} \end{frame} % --------------------------- \begin{frame}[fragile]{\currenttitle} To use in content scripts, we just have to add it to our \texttt{manifest.json}: \begin{lstlisting}[language=ES6] { ... "content_scripts": [ { "matches": [...], "js": ["jquery-3.6.0.min.js", ...] } ], ... } \end{lstlisting} The jQuery global (\texttt{\$}) is now available in all content scripts. \end{frame} % --------------------------- \begin{frame}[fragile]{\currenttitle} To use in a background scripts, we can do the same thing: \begin{lstlisting}[language=ES6] { ... "background": [ "scripts": ["jquery-3.6.0.min.js", ...] ], ... } \end{lstlisting} \end{frame} % --------------------------- \renewcommand{\currenttitle}{Using a package manager} \begin{frame}[fragile]{\currenttitle} We can use the Node Package Manager (npm) that comes with Node.js to install jQuery: \begin{lstlisting}[language=Bash,basicstyle=\ttfamily\small] npm init # initialize package.json npm install jquery # add jquery as a dependency \end{lstlisting} \vspace*{1em} This will create a \texttt{node\_modules} in your project directory. \end{frame} % --------------------------- \begin{frame}[fragile]{\currenttitle} We can now load jQuery from inside \texttt{/node\_modules}: \begin{lstlisting}[language=json] { ... "content_scripts": [ { "matches": [...], "js": ["node_modules/jquery/dist/jquery.js", ...] } ] ... } \end{lstlisting} \end{frame} % --------------------------- \begin{frame}[fragile]{\currenttitle} The npm registry (\url{npmjs.com}) has \textit{millions} of packages/libraries available for use. \\[2em] \textbf{Beware: Using npm is no different from downloading random crap off the internet.} \textbf{The JS package ecosystem is ripe with security issues.} \end{frame} % --------------------------- \renewcommand{\currenttitle}{Using a package manager + bundler} \begin{frame}[fragile]{\currenttitle} Modern web practices typically involve using a \textbf{bundler} such as Webpack or Parcel to combine all your assets into one or multiple JS files. \\[1em] This means that there will be an extra compilation step between writing code and testing your extension. \\[3em] Let me know if you're interested in exploring this. \end{frame} % --------------------------- \renewcommand{\currenttitle}{Typescript} \begin{frame}[fragile]{\currenttitle} You will have a much more software engineering experience with \textbf{Typescript}: \begin{itemize} \item Static type declarations and checking \begin{itemize} \item Eliminate a whole class of errors \item Better code completion \item Easier to test \item More readable \end{itemize} \item First-class new feature support \item Stricter lints \end{itemize} \end{frame} % ----------------------------------------------- \end{document}
{ "alphanum_fraction": 0.6127937969, "avg_line_length": 26.4551282051, "ext": "tex", "hexsha": "bfd6e5eb380ed70c32a4d265bc8bb000c15aae57", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "f9789e4b8339be8656a2d1630b5c16fdc6c692ef", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Dophin2009/webext101", "max_forks_repo_path": "sections/04-advanced.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "f9789e4b8339be8656a2d1630b5c16fdc6c692ef", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Dophin2009/webext101", "max_issues_repo_path": "sections/04-advanced.tex", "max_line_length": 78, "max_stars_count": null, "max_stars_repo_head_hexsha": "f9789e4b8339be8656a2d1630b5c16fdc6c692ef", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Dophin2009/webext101", "max_stars_repo_path": "sections/04-advanced.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1060, "size": 4127 }
\documentclass{bioinfo} \copyrightyear{2008} \pubyear{2008} %\newif\ifnonbi %\nonbifalse %\newif\ifbi %\bitrue \begin{document} \firstpage{1} \begin{application} \title[Infernal 1.0]{Infernal 1.0: inference of RNA alignments} \author[E. Nawrocki, D. Kolbe and S. Eddy]{Eric P. Nawrocki,\,$^1$ Diana L. Kolbe\,$^1$ and Sean R. Eddy\,$^1$\footnote{to whom correspondence should be addressed}} \address{$^{1}$HHMI Janelia Farm Research Campus, Ashburn VA 20147, USA\\} \history{Received on XXXXX; revised on XXXXX; accepted on XXXXX} \editor{Associate Editor: XXXXXXX} \maketitle %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{abstract} \section{Summary:} \textsc{infernal} builds consensus RNA secondary structure profiles called covariance models (CMs), and uses them to search nucleic acid sequence databases for homologous RNAs, or to create new sequence- and structure-based multiple sequence alignments. \section{Availability:} Source code, documentation, and benchmark downloadable from http://infernal.janelia.org. \textsc{infernal} is freely licensed under the GNU GPLv3 and should be portable to any POSIX-compliant operating system, including Linux and Mac OS/X. \section{Contact:} \{nawrockie,kolbed,eddys\}@janelia.hhmi.org \end{abstract} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} When searching for homologous structural RNAs in sequence databases, it is desirable to score both primary sequence and secondary structure conservation. The most generally useful tools that integrate sequence and structure take as input any RNA (or RNA multiple alignment), and automatically construct an appropriate statistical scoring system that allows quantitative ranking of putative homologs in a sequence database \citep{Gautheret01,ZhangBafna05,Huang08}. Stochastic context-free grammars (SCFGs) provide a natural statistical framework for combining sequence and (non-pseudoknotted) secondary structure conservation information in a single consistent scoring system \citep{Sakakibara94c,Eddy94,Brown00,Durbin98}. Here, we announce the 1.0 release of \textsc{infernal}, an implementation of a general SCFG-based approach for RNA database searches and multiple alignment. \textsc{infernal} builds consensus RNA profiles called \emph{covariance models} (CMs), a special case of SCFGs designed for modeling RNA consensus sequence and structure. It uses CMs to search nucleic acid sequence databases for homologous RNAs, or to create new sequence- and structure-based multiple sequence alignments. One use of \textsc{infernal} is to annotate RNAs in genomes in conjunction with the \textsc{Rfam} database \citep{Gardner09}, which contains hundreds of RNA families. \textsc{Rfam} follows a seed profile strategy, in which a well-annotated ``seed'' alignment of each family is curated, and a CM built from that seed alignment is used to identify and align additional members of the family. \textsc{infernal} has been in use since 2002, but 1.0 is the first version that we consider to be a reasonably complete production tool. It now includes E-value estimates for the statistical significance of database hits, and heuristic acceleration algorithms for both database searches and multiple alignment that allow \textsc{infernal} to be deployed in a variety of real RNA analysis tasks with manageable (albeit high) computational requirements. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Usage} A CM is built from a Stockholm format multiple sequence alignment (or single RNA sequence) with consensus secondary structure annotation marking which positions of the alignment are single stranded and which are base paired \citep{infguide03}. CMs assign position specific scores for the four possible residues at single stranded positions, the sixteen possible base pairs at paired positions, and for insertions and deletions. These scores are log-odds scores derived from the observed counts of residues, base pairs, insertions and deletions in the input alignment, combined with prior information derived from structural ribosomal RNA alignments. CM parameterization has been described in more detail elsewhere \citep{Eddy94,Eddy02b,KleinEddy03,infguide03,NawrockiEddy07}. \textsc{infernal} is composed of several programs that are used in combination by following four basic steps: \begin{enumerate} \item Build a CM from a structural alignment with \emph{cmbuild}. \item Calibrate a CM for homology search with \emph{cmcalibrate}. \item Search databases for putative homologs with \emph{cmsearch}. \item Align putative homologs to a CM with \emph{cmalign}. \end{enumerate} The calibration step is optional and computationally expensive (4 hours on a 3.0 GHz Intel Xeon for a CM of a typical RNA family of length 100 nt), but is required to obtain E-values that estimate the statistical significance of hits in a database search. \emph{cmcalibrate} will also determine appropriate HMM filter thresholds for accelerating searches without an appreciable loss of sensitivity. Each model only needs to be calibrated once. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Performance} A published benchmark (independent of our lab) \citep{Freyhult07} and our own internal benchmark used during development \citep{NawrockiEddy07} both find that \textsc{infernal} and other CM based methods are the most sensitive and specific tools for structural RNA homology search among those tested. Figure~1 shows updated results of our internal benchmark comparing \textsc{infernal} 1.0 to the previous version (0.72) that was benchmarked in \citet{Freyhult07}, and also to family-pairwise-search with BLASTN \citep{Altschul97, Grundy98b}. \textsc{infernal}'s sensitivity and specificity have greatly improved, due mainly to three relevant improvements in the implementation \citep{infguide03}: a biased composition correction to the raw log-odds scores, the use of Inside log likelihood scores (the summed score of all possible alignments of the target sequence) in place of CYK scores (the single maximum likelihood alignment score), and the introduction of approximate E-value estimates for the scores. The benchmark dataset used in Figure~1 includes query alignments and test sequences from 51 \textsc{Rfam} (release 7) families (details in \citep{NawrockiEddy07}). No query sequence is more than 60\% identical to a test sequence. The 450 total test sequences were embedded at random positions in a 10 Mb ``pseudogenome''. Previously we generated the pseudogenome sequence from a uniform residue frequency distribution \citep{NawrockiEddy07}. Because base composition biases in the target sequence database cause the most serious problems in separating significant CM hits from noise, we improved the realism of the benchmark by generating the pseudogenome sequence from a 15-state fully connected hidden Markov model (HMM) trained by Baum-Welch expectation maximization \citep{Durbin98} on genome sequence data from a wide variety of species. Each of the 51 query alignments was used to build a CM and search the pseudogenome, a single list of all hits for all families were collected and ranked, and true and false hits were defined (as described in \citet{NawrockiEddy07}), producing the ROC curves in Figure~1. \textsc{infernal} searches require a large amount of compute time (our 10 Mb benchmark search takes about 30 hours per model on average (Figure~1)). To alleviate this, \textsc{infernal} 1.0 implements two rounds of filtering. When appropriate, the HMM filtering technique described by \citet{WeinbergRuzzo06} is applied first with filter thresholds configured by \emph{cmcalibrate} (occasionally a model with little primary sequence conservation cannot be usefully accelerated by a primary sequence-based filter as explained in \citep{infguide03}). The query-dependent banded (QDB) CYK maximum likelihood search algorithm is used as a second filter with relatively tight bands ($\beta$= $10^{-7}$, the $\beta$ parameter is the subtree length probability mass excluded by imposing the bands as explained in \citep{NawrockiEddy07}). Any sequence fragments that survive the filters are searched a final time with the Inside algorithm (again using QDB, but with looser bands ($\beta$= $10^{-15}$)). In our benchmark, the default filters accelerate similarity search by about 30-fold overall, while sacrificing a small amount of sensitivity (Figure~1). This makes version 1.0 substantially faster than 0.72. \textsc{BLAST} is still orders of magnitude faster, but significantly less sensitive than \textsc{infernal}. Further acceleration remains a major goal of \textsc{infernal} development. The computational cost of CM alignment with \emph{cmalign} has been a limitation of previous versions of \textsc{infernal}. Version 1.0 now uses a constrained dynamic programming approach first developed by \citet{Brown00} that uses sequence-specific bands derived from a first-pass HMM alignment. This technique offers a dramatic speedup relative to unconstrained alignment, especially for large RNAs such as small and large subunit (SSU and LSU) ribosomal RNAs, which can now be aligned in roughly 1 and 3 seconds per sequence, respectively, as opposed to 12 minutes and 3 hours in previous versions. This acceleration has facilitated the adoption of \textsc{infernal} by RDP, one of the main ribosomal RNA databases \citep{Cole09}. \textsc{infernal} is now a faster and more sensitive tool for RNA sequence analysis. Version 1.0's heuristic acceleration techniques make some important applications possible on a single desktop computer in less than an hour, such as searching a prokaryotic genome for a particular RNA family, or aligning a few thousand SSU rRNA sequences. Nonetheless, \textsc{infernal} remains computationally expensive, and many problems of interest require the use of a cluster. The most expensive programs (\emph{cmcalibrate}, \emph{cmsearch}, and \emph{cmalign}) are implemented in coarse-grained parallel MPI versions which divide the workload into independent units, each of which is run on a separate processor. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section*{Acknowledgement} We thank Goran Ceric for his peerless skill in managing Janelia Farm's high performance computing resources. \paragraph*{Funding\textcolon} \textsc{Infernal} development is supported by the Howard Hughes Medical Institute. It has been supported in the past by an NIH NHGRI training grant (T32-HG000045) to EPN, an NSF Graduate Fellowship to DLK, NIH R01-HG01363, and a generous endowment from Alvin Goldfarb. \begin{figure}[h] \centerline{\includegraphics{roc-short}} \caption{\textbf{ROC curves for the benchmark.} Plots are shown for the new \textsc{infernal} 1.0 with and without filters, for the old \textsc{infernal} 0.72, and for family-pairwise-searches (FPS) with \textsc{blastn}. CPU times are total times for all 51 family searches measured for single execution threads on 3.0 GHz Intel Xeon processors. The \textsc{infernal} 1.0 times do not include time required for model calibration.} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % please remove the " % " symbol from \centerline{\includegraphics{fig01.eps}} % as it may ignore the figures. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %\bibliographystyle{natbib} %\bibliography{master,books,lab,new} \begin{thebibliography}{} \bibitem[Altschul {\em et~al.}(1997)Altschul, Madden, Schaffer, Zhang, Zhang, Miller, and Lipman]{Altschul97} Altschul, S.~F., Madden, T.~L., Schaffer, A.~A., Zhang, J., Zhang, Z., Miller, W., and Lipman, D.~J. (1997). \newblock Gapped {BLAST} and {PSI-BLAST}: A new generation of protein database search programs. \newblock {\em Nucl. Acids Res.}, {\bf 25}, 3389--3402. \bibitem[Brown(2000)Brown]{Brown00} Brown, M.~P. (2000). \newblock Small subunit ribosomal {RNA} modeling using stochastic context-free grammars. \newblock {\em Proc. Int. Conf. Intell. Syst. Mol. Biol.}, {\bf 8}, 57--66. \bibitem[Cole {\em et~al.}(2009)Cole, Wang, Cardenas, Fish, Chai, Farris, Kulam-Syed-Mohideen, McGarrell, Marsh, Garrity, and Tiedje]{Cole09} Cole, J.~R., Wang, Q., Cardenas, E., Fish, J., Chai, B., Farris, R.~J., Kulam-Syed-Mohideen, A.~S., McGarrell, D.~M., Marsh, T., Garrity, G.~M., and Tiedje, J.~M. (2009). \newblock The {R}ibosomal {D}atabase {P}roject: Improved alignments and new tools for {rRNA} analysis. \newblock in press. \bibitem[Durbin {\em et~al.}(1998)Durbin, Eddy, Krogh, and Mitchison]{Durbin98} Durbin, R., Eddy, S.~R., Krogh, A., and Mitchison, G.~J. (1998). \newblock {\em Biological Sequence Analysis: Probabilistic Models of Proteins and Nucleic Acids\/}. \newblock Cambridge University Press, Cambridge UK. \bibitem[Eddy(2002)Eddy]{Eddy02b} Eddy, S.~R. (2002). \newblock A memory-efficient dynamic programming algorithm for optimal alignment of a sequence to an {RNA} secondary structure. \newblock {\em BMC Bioinformatics\/}, {\bf 3}, 18. \bibitem[Eddy(2003)Eddy]{infguide03} Eddy, S.~R. (2003). \newblock The {I}nfernal user's guide. \newblock [http://infernal.janelia.org/]. \bibitem[Eddy and Durbin(1994)Eddy and Durbin]{Eddy94} Eddy, S.~R. and Durbin, R. (1994). \newblock {RNA} sequence analysis using covariance models. \newblock {\em Nucl. Acids Res.}, {\bf 22}, 2079--2088. \bibitem[Freyhult {\em et~al.}(2007)Freyhult, Bollback, and Gardner]{Freyhult07} Freyhult, E.~K., Bollback, J.~P., and Gardner, P.~P. (2007). \newblock Exploring genomic dark matter: A critical assessment of the performance of homology search methods on noncoding {RNA}. \newblock {\em Genome Res.}, {\bf 17}, 117--125. \bibitem[Gardner {\em et~al.}(2009)Gardner, Daub, Tate, Nawrocki, Kolbe, Lindgreen, Wilkinson, Finn, Griffiths-Jones, Eddy, and Bateman]{Gardner09} Gardner, P.~P., Daub, J., Tate, J.~G., Nawrocki, E.~P., Kolbe, D.~L., Lindgreen, S., Wilkinson, A.~C., Finn, R.~D., Griffiths-Jones, S., Eddy, S.~R., and Bateman, A. (2009). \newblock Rfam: Updates to the {RNA} families database. \newblock NAR, in press. \bibitem[Gautheret and Lambert(2001)Gautheret and Lambert]{Gautheret01} Gautheret, D. and Lambert, A. (2001). \newblock Direct {RNA} motif definition and identification from multiple sequence alignments using secondary structure profiles. \newblock {\em J. Mol. Biol.}, {\bf 313}, 1003--1011. \bibitem[Grundy(1998)Grundy]{Grundy98b} Grundy, W.~N. (1998). \newblock Homology detection via family pairwise search. \newblock {\em J. Comput. Biol.}, {\bf 5}, 479--491. \bibitem[Huang {\em et~al.}(2008)Huang, Wu, Robertson, Feng, Malmberg, and Cai]{Huang08} Huang, Z., Wu, Y., Robertson, J., Feng, L., Malmberg, R., and Cai, L. (2008). \newblock Fast and accurate search for non-coding rna pseudoknot structures in genomes. \newblock {\em Bioinformatics\/}, {\bf 24}, 2281--2287. \bibitem[Klein and Eddy(2003)Klein and Eddy]{KleinEddy03} Klein, R.~J. and Eddy, S.~R. (2003). \newblock {RSEARCH:} finding homologs of single structured {RNA} sequences. \newblock {\em BMC Bioinformatics\/}, {\bf 4}, 44. \bibitem[Nawrocki and Eddy(2007)Nawrocki and Eddy]{NawrockiEddy07} Nawrocki, E.~P. and Eddy, S.~R. (2007). \newblock Query-dependent banding ({QDB}) for faster {RNA} similarity searches. \newblock {\em PLoS Comput. Biol.}, {\bf 3}, e56. \bibitem[Sakakibara {\em et~al.}(1994)Sakakibara, Brown, Hughey, Mian, Sj{\"{o}}lander, Underwood, and Haussler]{Sakakibara94c} Sakakibara, Y., Brown, M., Hughey, R., Mian, I.~S., Sj{\"{o}}lander, K., Underwood, R.~C., and Haussler, D. (1994). \newblock Stochastic context-free grammars for {tRNA} modeling. \newblock {\em Nucl. Acids Res.}, {\bf 22}, 5112--5120. \bibitem[Weinberg and Ruzzo(2006)Weinberg and Ruzzo]{WeinbergRuzzo06} Weinberg, Z. and Ruzzo, W.~L. (2006). \newblock Sequence-based heuristics for faster annotation of non-coding {RNA} families. \newblock {\em Bioinformatics\/}, {\bf 22}, 35--39. \bibitem[Zhang {\em et~al.}(2005)Zhang, Haas, Eskin, and Bafna]{ZhangBafna05} Zhang, S., Haas, B., Eskin, E., and Bafna, V. (2005). \newblock Searching genomes for noncoding {RNA} using {FastR}. \newblock {\em IEEE/ACM Trans. Comput. Biol. Bioinform.}, {\bf 2}, 366--379. \end{thebibliography} \end{application} \end{document}
{ "alphanum_fraction": 0.743616058, "avg_line_length": 47.6005747126, "ext": "tex", "hexsha": "bae508a2ec0a3ea835bab90ea882b6943cbc2edc", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a358a4984a90efd8177a82440f7576204735ae5c", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "lamby/infernal", "max_forks_repo_path": "Manuscripts/inf-1-appnote/manuscript.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a358a4984a90efd8177a82440f7576204735ae5c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "lamby/infernal", "max_issues_repo_path": "Manuscripts/inf-1-appnote/manuscript.tex", "max_line_length": 164, "max_stars_count": null, "max_stars_repo_head_hexsha": "a358a4984a90efd8177a82440f7576204735ae5c", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "lamby/infernal", "max_stars_repo_path": "Manuscripts/inf-1-appnote/manuscript.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4625, "size": 16565 }
\section{Future Work} There are significant possibilities for expanding this work. %The initial intent was to develop a Pluggable Transport, and this is the most important piece of future enhancement. This would involve either modifying obfsproxy or manually integrating with with Tor. The biggest improvement could be made in the framing module. This module could vary the encoding scheme and encoding amount such that the same style of data is not seen consistently. This can be done by using a timer to buffer data up to a random amount of data to be sent. If that random amount is seen before the timer expires, it is sent, and a new random time and amount of data is chosen. This should make inter-arrival times more random than Tor normally is. Which encoding scheme is chosen depends on whether HTPT is acting in the client or server mode, and the data rate. This is measured based on the pervious transmission. That is, if 1000 bytes was received in the previous 10 milliseconds, the data rate can be calculated as being 100,000 bytes per second. This assumes that the data rate stays constant, which is why the time and buffer sizes are used in conjunction with each other. Integrating this into the obfsproxy framework and bundling with the Tor Browser Bundle would be good to do in the future. Another further enhancement is to enhance authentication. Current authentication is basic HTTP's basic access authentication. This is very simplistic, and any information is sent in plaintext. Clearly, that is a security problem, so a more secure authentication scheme is justified. Replay attacks need to be looked at in greater detail to determine if further mitigation is needed. The performance of this tool could also be improved by implementingHTTP pipelining and reusing TCP connections. By sending multiple requests without waiting for a response, HTTP pipelining makes more effective use of the available bandwidth. TCP connection reuse also improves performance by avoiding an extra RTT each time data is sent and by allowing the sender to use congestion control and use more bandwidth. %There is the possibility of the fingerprinting the header that all packets have. It would be hard to do, and would require stateful DPI. To mitigate this, encrypting the header or the entire packet again would be mitigate this. Encrypting the header is less costly computationally, and would be more than sufficient. Implementing some sort of key exchange algorithm (e.g., Diffie-Hellman) would be necessary as well.
{ "alphanum_fraction": 0.8118577075, "avg_line_length": 140.5555555556, "ext": "tex", "hexsha": "634b691cc17b92295f08735ad07461a6bcdfb3d3", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-10-17T16:57:23.000Z", "max_forks_repo_forks_event_min_datetime": "2019-10-17T16:57:23.000Z", "max_forks_repo_head_hexsha": "eb9004fb3e40e760bb9add772340a5d5805a7558", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ben-jones/facade", "max_forks_repo_path": "docs/FinalReport/FutureWork.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "eb9004fb3e40e760bb9add772340a5d5805a7558", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ben-jones/facade", "max_issues_repo_path": "docs/FinalReport/FutureWork.tex", "max_line_length": 466, "max_stars_count": null, "max_stars_repo_head_hexsha": "eb9004fb3e40e760bb9add772340a5d5805a7558", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ben-jones/facade", "max_stars_repo_path": "docs/FinalReport/FutureWork.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 503, "size": 2530 }
\makeatletter \@ifundefined{rootpath}{\input{../../setup/preamble.tex}}\makeatother \worksheetstart{Introduction}{1}{September 5, 2014}{Kasper}{../../} In \bsref{sec:motivation} we describe the motivation of the work behind this report. We account for the related work in \bsref{sec:relatedwork}. We conduct a preliminary investigation of concurrency models in \bsref{sec:prelim}. Finally, we describe the problem statement in \bsref{sec:problemstatement}. \label{chap:intro} \section{Motivation} % Moores law, skalér med flere kerner, ikke højere CPU % Krav til at applikationer kræver mere ydelse % Focus på skalering til flere computere i.e. parallel computing, ikke kun flere kerner % Brug for en evolution, inden for tilgange til at skrive parallele programmer % Vi vil kigge på concurrency inden for rammerne af samme maskine Moore's law\cite{moore1965cramming} is the empirical observation that the number of transistors per area, on a integrated circuit, was approximately doubled every 18 months\cite[p. 203]{mack2011fifty}. As a result, processing speed kept increasing as more transistors where added. This increase has however stagnated and processing power has mainly increased in the form of additional processing cores, as opposed to the speed of each of these processing cores\cite[p. 22]{sevenModels}. That is, computers have moved from having a \ac{CPU} consisting of one single core to one having multiple cores. This tendency is displayed in \bsref{fig:moores_in_reality}. \label{sec:motivation} \begin{figure}[htbp] \centering \includegraphics[width=0.9\textwidth]{\rootpath/worksheets/introduction/figures/moores_core_vs_frequency} \caption{A comparison of the development of Transistors, Frequency, and Cores\cite{isca2009}. A change of the tendency occurred around 2005. The overall number of transistors has kept increasing, but the increase in MHz, has been substituted by an increase in the number of cores.} \label{fig:moores_in_reality} \end{figure} Programming languages developed while it was believed that single core performance would keep increasing, such as assembly and C, were closely related to the Von Neumann architecture. As a consequence, the programming model was designed for synchronous execution. This introduces a challenge in how programs should be written to execute efficiently on a multi-core processor. A lot of effort has been invested into identifying suitable ways of programming against such architectures: With inspiration from the database domain, transactions have been integrated into software, known as \ac{STM}\cite{shavit1997software}\cite{scherer2005advanced}. \ac{FP} is having a renaissance, and is now being used outside the world of academia, for instance in the financial sector\cite{minsky2008caml}. Another proposal is the Actor model\cite{hewitt1973universal}, that with the rise of multi-core processors has gained a lot of popularity in different languages \cite{haller2007actors}\cite{hewitt2014actor}. The purpose of this report is to compare different ways of concurrent programming with a focus on their runtime performance and characteristics. The focus will be on concurrency models that have seen widespread use, as well as models that currently are receiving academic and industrial interest. The result will assist programmers in selecting a concurrency model for their project based on the projects requirements. \section{Related Work}\label{sec:relatedwork} % Formål med preliminary analysis This section contains an analysis of work related to concurrency models. Papers, articles, and other research material of relevance have been read, in order to discover state of the art, and open problems within the area of concurrency models. The purpose of the analysis is to establish an overview which will be used to choose the further path of our investigation. % Moores law, skalér med flere kerner, ikke højere CPU % Krav til at applikationer kræver mere ydelse % Focus på skalering til flere computere i.e. parallel computing, ikke kun flere kerner % Brug for en evolution, inden for tilgange til at skrive parallele programmer % Vi vil kigge på concurrency inden for rammerne af samme maskine %Threads and locks duer ikke %Hvad er der behov for %Hvad andet findes der, der opfylder behovet In ``The Free Lunch is Over''\cite{sutter2005free}, it is claimed that the era of gaining performance increase without changing a program is over: \bsqoute{[...] if you want your application to benefit from the continued exponential throughput advances in new processors, it will need to be a well-written concurrent (usually multithreaded) application. And that’s easier said than done, because not all problems are inherently parallelizable and because concurrent programming is hard.}{Herb Sutter} In other words, hardware limitations in the processor development are having an impact on the way software should be developed to utilize the full potential of multi-core processors. The key to take advantage of the raw performance of future multicore processors, is to enable the programmer to harvest that performance\cite[p. 31]{asanovic2006landscape}. In ``The landscape of Parallel Computing Research''\cite{asanovic2006landscape} they propose that a programming model needs to enable productivity and implementation efficiency\cite[p. 31]{asanovic2006landscape}. Productivity is defined as the speed a programmer can develop a program, and implementation efficiency is defined as the runtime performance of the program. In recent years research has gone into designing languages and language constructs that allows the programmer to easily scale an application over multiple cores or even machines. One of the languages designed is X10\cite{charles2005x10}. X10 encompasses a language construct that allows the programmer to talk about where a statement should be executed, using the notion of places\cite[p. 54]{tardieu2014x10}. Combined with specification of either synchronous or asynchronous execution, this construct allows the programmer to spawn tasks to execute on other machines. Spawning asynchronous tasks on the same machine is also possible\cite[p. 55]{tardieu2014x10}. X10 has been shown to perform between 41\% and 87\% efficiency of the highly optimized IBM HPC class 1 benchmark C implementation\cite[p. 62]{tardieu2014x10}. Existing languages has also gained constructs for scaling applications over multiple cores and machines. As an example, Cloud Haskell extends Haskell with constructs for asynchronous message parsing as inspired by Erlang\cite[p. 119]{epstein2011towards}. It further extends this idea with communication over statically typed channels\cite[p. 122]{epstein2011towards}. Cloud Haskell has been shown to scale better than the Hadoop MapReduce framework\cite[p. 128]{epstein2011towards}. The productivity and implementation efficiency of a concurrency model should both be as high as possible, since they are both positive effects. While the wish to enable both productivity and implementation efficiency seems reasonable, the two goals are often conflicting. An abstraction over concurrency mechanisms frees the programmer from low level details, and increases productivity. However, by using the abstraction, the programmer loses the ability to fine tune the underlying concurrency implementation for performance. A concurrency model must therefore seek to find the right abstraction, that gives the programmer enough abstraction for a productivity increase, without losing too much implementation efficiency. With these discoveries taken into account, a concurrency model with abstraction over low level details, and solid runtime performance is of special interest for our investigation. \section{Preliminary Investigation}\label{sec:prelim} The purpose of this section is to document our preliminary concurrency model analysis. The analysis is conducted in order to establish a foundation on which to choose the models to analyse further. For each model, a reason for looking into it will be given followed by a short description, which will provide an overview of the model. \subsection{\acl{TL}} \label{subsec:tl} The \ac{TL} model has been chosen based on its historical significance and widespread commercial use\cite[p. 58]{sutter2005software}. The traditional \ac{TL} approach is based on shared memory and uses locks to limit the access to critical regions in order to ensure correct execution\cite[p. 1]{saha2006mcrt}. The use of threads and locks leads to a number of issues, including: deadlocks, difficult fined-grained synchronization and no support for error recovery\cite[p. 187]{saha2006mcrt}. The \ac{TL} model has been implemented in many languages, including C and Java. A problem of particular importance in modern software development is composition of code segments. Many programs rely on libraries to handle part of its operations. Using the \ac{TL} approach it is however not guarantied that combining two lock based code segments will result in a correct program\cite[p. 56]{sutter2005software}. Using the \ac{TL} approach it is left up to the programmer to identity correct lock placements as well as balancing lock granularity vs performance\cite[p. 49]{harris2005composable}. \ac{TL} concurrency is generally believed to be hard to get right\cite[p. 92]{herlihy2003software}. Arguments have been made for the case that the \ac{TL} approach is insufficient for today's concurrency needs and that new models which put less strain on the programmer are needed\cite[p. 3]{jones2007beautiful}\cite[p. 48]{harris2005composable}. \subsection{Actor model} %Formalism-centric programming models, such as Actors [Hewitt et al 1973], try to reduce the chance of programmer making mistakes by having clean semantics and offer the chance to remove bugs by verifying correctness of portions of the code. "A view from berkeley, p. 33" - Toby: Maybe use this in the conclusion of which model to choose The actor model was developed by Carl Hewitt and his team at MIT in 1973\cite{hewitt1973universal}. The goal was to simplify building concurrent systems and simplify reasoning about concurrent programs\cite[p. 14]{haller2012actors}. Since the goal of the actor model seeks to solve well known problems of concurrency, which we describe further in \bsref{sec:tl_ci}, the model is a candidate of interest for further investigation. The actor model is a general model for concurrency that can be used with almost any language and it is often known as the concurrency model of Erlang\footnote{\url{http://erlang.org}}. The actor model is directly embedded within erlang\cite[p. 115]{sevenModels}. Other languages that implement the actor model, either directly or indirectly, includes Scala, Smalltalk, Java, C++\footnote{http://c2.com/cgi/wiki?ActorLanguages}. %In addition to the goal of the actor model, the popularity and widespread use of the actor model also contributes to our interest to the model\andreas{Source?}. The idea of the actor model is to use actors as the fundamental unit of computation. An actor has the following essential elements of computation\cite{actorLangNextVideo}: Processing, storage, and communication. The actor encapsulates state and communicates with other actors through asynchronous message passing. In response to a message an actor can\cite{hewitt2014actor}: \begin{itemize} \item Create new actors \item Send messages to actors it knows \item Modify internal behavior (how the next message it receives should be handled) \end{itemize} An actor has a mailbox which messages sent to the actor arrives in. The actor dequeues a message from the mailbox and processes that message. Messages are processed one at a time. Messages sent can take arbitrarily long time to arrive, and if sent concurrently can arrive in a mailbox in any order\cite{hewitt2014actor}. By avoiding shared state the actor model removes the issues of race conditions\citep[Chap. 32]{odersky2011programming}. Instead the actor model allows isolated mutable state on actors and rely solely on asynchronous message passing between actors. \subsection{\acl{Rx}} At Microsoft Research, Erik Meijer and his team have developed \ac{Rx} which is a library for composing asynchronous and event-based programs. The approach has gained a lot of attention by big players in the industry. As a result the approach has spread to a number of platforms. For instance Netflix has ported it to Java\cite{RxJava}, and Facebook ported it to JavaScript\cite{react}. Google is also inspired by the idea in their new language Dart\cite{dart}. \ac{Rx} gives reactive capabilities to mainstream imperative languages, such as Java and C\#\cite{csharp}. The idea is, to abstract over the complexity that asynchronous computations introduces, by providing a way to orchestrate asynchronous data streams in a uniform way, regardless of the underlying concurrency model. \ac{Rx} provides a way to deal with asynchronous data streams, the same way programmers deal with synchronous data streams. In Java, a synchronous data stream is a data structure which implements the \bscode{Iterable} interface\footnote{\url{http://docs.oracle.com/javase/7/docs/api/java/lang/Iterable.html}}. However, while iterations over an \bscode{Iterable} blocks between each iteration, \ac{Rx} does not block between each push of the next element. This a key difference, since not blocking while doing concurrency computations is essential for performance. It is based on the idea of the Observer Pattern\cite{gamma1994design}. That is, whenever the subject of observation, called an observable, is changed, the observers are notified, and can react to the change. This is said to be a push based approach, since changes is pushed out to its observers. This is both true for the Observer Pattern and \ac{Rx}. Since \ac{Rx} is intended for asynchronous tasks that can fail, for example network calls, it extends the Observer Pattern and provides a way to notify its observers whenever an error occurred or if the stream ends. \ac{Rx} can hardly be called a concurrency model itself, since it cannot be used to spawn concurrent tasks. However, it provides a uniform way, to structure asynchronous tasks independently of the underlying concurrency model. The underlying model could be \ac{TL}, actors, or a third alternative. It does not matter, since this is abstracted away from the client, that treats all interaction with Observables as asynchronous. \subsection{\acl{STM}} \label{sec:prelim_stm} \ac{STM} has been viewed by many people as a promising direction for solving concurrency issues\cite{sutter2005software}. \ac{STM} has received a lot of attention due to its promise of simplifying shared memory concurrency and is still an active area of research. Due to this, it is a natural candidate of interest. \ac{STM} takes an approach to concurrency, that is based on transactions as known from database theory\cite[p. 1]{shavit1997software}. \ac{STM} transitions are: \begin{inparaenum}[(1)] \item Atomic, all writes in a given transaction are committed, or none of them are. \item Isolated, transaction do not interfere with one another\cite{herlihy2011tm}. \end{inparaenum} These properties are similar to the \ac{ACID} properties known from databases\cite[p. 754]{elmasri2011fundamentals}. \ac{STM} is, either directly or using a library, implemented in a number of languages, including Clojure\cite[p. 101]{sevenModels}, Concurrent Haskell\cite{harris2005composable} and Scala\cite{goodman2011muts}. Generally the idea is that programmers specify regions which are to be executed as a transaction. The compiler or library then takes care of ensuring that the \ac{STM} transaction principles are maintained and that the transactions are eventually committed\cite[p. 1]{saha2006mcrt}. A transaction will be retried if any conflicts occur during its execution. A conflict being a violation of the isolation principle described above. It is retried until it is successful or some upper limit is reached. The strength of \ac{STM} lies in the avoidance of many of the issues mentioned in \bsref{subsec:tl} that plague the traditional \ac{TL} approach. \ac{STM} can avoid deadlocks, priority inversion and eliminates the issue of balancing lock granularity versus performance\cite[p. 1]{harris2005composable}. \subsection{Communicating Sequential Processes} The \ac{CSP} model was invented by C. A. R. Hoare\cite{hoare1978communicating} and the paper is widely regarded as one of the most influential papers in computer science\cite{abdallah2005communicating}. \ac{CSP} has influenced the design of numerous languages and it is implemented either directly in the language or indirectly in the form of a library. This includes programming languages such as Ada, occam and Concurrent ML\cite{abdallah2005communicating}. The model has still a lot of attention and the recent popularity is especially due to the Go\footnote{https://golang.org/doc/faq\#csp} programming language developed by Google, as Go builds their concurrency model on the ideas of \ac{CSP}\cite[Chap. 6]{sevenModels}. \ac{CSP} is therefore seen as an important and viable candidate for consideration. A process in \ac{CSP} is a basic construct that operates independently and communication between such processes enables concurrency\cite{ibmCSP}. There is no shared state between processes and communication is accomplished solely through message passing to and from channels. A channel in \ac{CSP} is a first-class queue where messages are added to in one end and removed from in the other\cite[Chap. 6]{sevenModels}. It is possible to have arbitrarily many reader/writer processes on a channel. The strength of \ac{CSP} lies in avoiding many of the issues that are related to shared state by modelling computations as independent isolated processes which communicate together through channels. % nævn den originalt er en mathematical model? %Noget om at CSP har ændret sig over tid? Actor har altid været den samme %Til valg af modeller: CSP og Actor minder meget om hinaden, derfor vælger vi kun at se på Actor (men der er forskelle! evt. vælg at pointere dem ud, eller skriv et lille afsnit om det i Actor afsnittet) %Evt. begrund med at vi vælger CSP fra fordi det er mere matematisk orienteret, hvilket ikke er attraktivt for %CSP har fokus på channels (kommunikation mellem entities), hvor Actor har fokus på Actors (entities) \section{Scope} \label{sec:intro_scope} Since it is not feasible to investigate every concurrency model, we will limit the scope of this report to the concurrency models that, with our current knowledge, has the most potential to solve known problems of concurrency. In our preliminary analysis, we briefly covered the traditional \ac{TL} model, as well as the actor model, \ac{Rx}, \ac{STM} and \ac{CSP} approaches. From these models we have chosen to continue our investigation into \ac{TL}, Actors, and \ac{STM}. \ac{TL} has been selected based on its widespread use and historical significance. Actors and \ac{STM} have been chosen based on their up and coming usage or ongoing research efforts. Each of these models also have a vastly different approach to handling concurrency, and will provide varying perspectives. We choose not to investigate \ac{Rx} as it is not a concurrency model, as it cannot spawn concurrent tasks, and therefore does not fit into our investigation. The actor model and CSP are similar models as they both consist of independent concurrently executing entities, processes in CSP and actors in the actor model, that only communicate by sending messages. The important difference between the models is that the actor model has focus on the entities (actors), where CSP has focus on channels used to communicate between entities (processes)\cite[p. 153]{sevenModels}. Due to the similarity of the models we have chosen to look only at one of the models. The actor model has been chosen as it has gained traction trough implementation in languages such as Erlang and frameworks such as Akka. In order to analyse the chosen concurrency models, we will do an implementation with each model. To test the runtime performance of the models, and not of the language that contains the model, we need to either find a language that contains all of the chosen models, or a common foundation for the languages chosen. To our best knowledge, it is not possible to find a single language, that contains native implementations of all the selected concurrency models. This is in spite of extended research of existing languages. Although libraries implementing the models do exist for a variety of languages, they do not yield the same optimization capacities, as languages with a native implementation, which can do compiletime optimizations. Due to this, multiple languages will be employed. One language will be chosen for each of the selected models and this language will be used to exemplify the model as well as implementing the performance testcase described in \bsref{chap:performance}. To ensure that our performance test produces comparable results across these languages, a common foundation is needed. Such a foundation is to ensure that the runtime environment of the languages is similar enough to produce comparable results e.g. not comparing the performance of a natively compiled language, such as C++, to a interpreted language, such as Python. We have chosen to limit the languages employed to those running on the \ac{JVM}. The \ac{JVM} was explicitly designed for the Java language, but a large number of languages, of varying paradigms, target the \ac{JVM} \cite[p. 1]{singer2003jvm}. This provides a large basis for selection of languages, all executing on the same runtime. Sharing the runtime will enable better comparability of performance. The \ac{CLR} was also considered as a common basis. While it is designed for being the target of multiple languages, it does not have the platform independence that the \ac{JVM} offers. An open source implementation, running on Linux, is available trough the mono project\footnote{\url{http://www.mono-project.com/}}. This has however been shown to have significantly worse performance than its Windows counterpart\cite[p. 59]{totoo2012haskell}. \section{Problem Statement}\label{sec:problemstatement} In order to structure our investigation into concurrency models, we have extracted a number of problem statement questions. The questions and selected models are based on findings presented in our preliminary analysis, described previously. These questions will serve as a guideline for our further investigation, and will be used to conclude upon the project in \bsref{chap:conclusion}. The questions we seek to answer are: \begin{enumerate} \item Which issues exist with the traditional \ac{TL} concurrency model? \item What are the characteristics of the selected models? Including their strengths and weaknesses. \item How do the selected models handle concurrency issues known from the traditional \ac{TL} approach? \item How is the runtime performance of the selected models? \end{enumerate} Using the knowledge obtained from answering these questions, we will present an overview of the selected concurrency models. Specifically an overview of their characteristics and runtime performance, along with a set of suggestions for when to apply each model. \subsection{Learning Goals} Along with the definition of a problem statement, a set of learning goals have been defined. While answering the questions presented in the problem statement is the main focus, the learning goals can be viewed as a set of sub goals that are aimed at stimulating our learning. The learning goals are defined as: \begin{enumerate} \item Learn what concurrency models exist and in what setting they are best applied. \item Produce a sample implementation using each of the selected models in order to gain hands-on experience. \item Get hands-on experience with the Scala\footnote{\url{http://www.scala-lang.org/}} programming language as it is gaining traction as a productive multiparadigm language, used by big companies such as Twitter, LinkedIn, Foursquare, and SoundCloud. \end{enumerate} \worksheetend
{ "alphanum_fraction": 0.8084079376, "avg_line_length": 156.3806451613, "ext": "tex", "hexsha": "3a975ab09f4a8653de12e14b5401759c0825811f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "72c03fa65582c0c55b28344c68d5416b083f2369", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Felorati/scalability", "max_forks_repo_path": "Report/worksheets/introduction/introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "72c03fa65582c0c55b28344c68d5416b083f2369", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Felorati/scalability", "max_issues_repo_path": "Report/worksheets/introduction/introduction.tex", "max_line_length": 1353, "max_stars_count": null, "max_stars_repo_head_hexsha": "72c03fa65582c0c55b28344c68d5416b083f2369", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Felorati/scalability", "max_stars_repo_path": "Report/worksheets/introduction/introduction.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5297, "size": 24239 }
\documentclass[a4paper, 12pt]{article} \usepackage{amsmath} \usepackage{amssymb} \usepackage{dsfont} \usepackage[left=1.5cm, right=1.5cm, bottom=2cm, top=2cm]{geometry} \usepackage{graphicx} \usepackage{hyperref} \usepackage[utf8]{inputenc} \usepackage{microtype} \usepackage{natbib} \newcommand{\btheta}{\boldsymbol{\theta}} \title{} \author{Brendon J. Brewer} \date{} \begin{document} \maketitle \abstract{} % Need this after the abstract \setlength{\parindent}{0pt} \setlength{\parskip}{8pt} \section{Introduction} \bibliographystyle{chicago} \bibliography{references.bib/references} \end{document}
{ "alphanum_fraction": 0.7655737705, "avg_line_length": 18.4848484848, "ext": "tex", "hexsha": "1a87e9fe56c5a93b2ec421024262c5c1be4628d9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c823510fd533289e92c856fadee3b2b332c2c935", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "eggplantbren/Templates", "max_forks_repo_path": "ms.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c823510fd533289e92c856fadee3b2b332c2c935", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "eggplantbren/Templates", "max_issues_repo_path": "ms.tex", "max_line_length": 67, "max_stars_count": null, "max_stars_repo_head_hexsha": "c823510fd533289e92c856fadee3b2b332c2c935", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "eggplantbren/Templates", "max_stars_repo_path": "ms.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 201, "size": 610 }
\subsection{Camera and viewing parameters} \index{camera} One of the most important parts of any scene, is the camera position and orientation. Having a good angle on a scene can make the difference between an average looking scene and a strikingly interesting one. There may be multiple camera definitions in a scene file, but the last camera definition overrides all previous definitions. There are several parameters that control the camera in \RAY, {\bf PROJECTION}, {\bf ZOOM}, {\bf ASPECTRATIO}, {\bf ANTIALIASING}, {\bf CENTER}, {\bf RAYDEPTH}, {\bf VIEWDIR}, and {\bf UPDIR}. The first and last keywords required in the definition of a camera are the {\bf CAMERA} and {\bf END\_CAMERA} keywords. The {\bf PROJECTION} keyword is optional, the remaining camera keywords are required, and must be written in the sequence they are listed in the examples in this section. \subsubsection{Camera projection modes} \index{camera!projection} The {\bf PROJECTION} keyword must be followed by one of the supported camera projection mode identifiers {\bf PERSPECTIVE}, {\bf PERSPECTIVE_DOF}, {\bf ORTHOGRAPHIC}, or {\bf FISHEYE}. The {\bf FISHEYE} projection mode requires two extra parameters {\bf FOCALLENGTH} and {\bf APERTURE} which precede the regular camera options. \begin{verbatim} Camera projection perspective_dof focallength 0.75 aperture 0.02 Zoom 0.666667 Aspectratio 1.000000 Antialiasing 128 Raydepth 30 Center 0.000000 0.000000 -2.000000 Viewdir -0.000000 -0.000000 2.000000 Updir 0.000000 1.000000 -0.000000 End_Camera \end{verbatim} \subsubsection{Common camera parameters} \index{camera!zoom} The {\bf ZOOM} parameter controls the camera in a way similar to a telephoto lens on a 35mm camera. A zoom value of 1.0 is standard, with a 90 degree field of view. By changing the zoom factor to 2.0, the relative size of any feature in the frame is twice as big, while the field of view is decreased slightly. The zoom effect is implemented as a scaling factor on the height and width of the image plane relative to the world. \index{camera!aspect ratio} The {\bf ASPECRATIO} parameter controls the aspect ratio of the resulting image. By using the aspect ratio parameter, one can produce images which look correct on any screen. Aspect ratio alters the relative width of the image plane, while keeping the height of the image plane constant. In general, most workstation displays have an aspect ratio of 1.0. To see what aspect ratio your display has, you can render a simple sphere, at a resolution of 512x512 and measure the ratio of its width to its height. \index{camera!antialiasing} The {\bf ANTIALIASING} parameter controls the maximum level of supersampling used to obtain higher image quality. The parameter given sets the number of additional rays to trace per-pixel to attain higher image quality. \index{camera!maximum ray depth} The {\bf RAYDEPTH} parameter tells \RAY\ what the maximum level of reflections, refraction, or in general the maximum recursion depth to trace rays to. A value between 4 and 12 is usually good. A value of 1 will disable rendering of reflective or transmissive objects (they'll be black). \index{camera!orientation} The remaining three camera parameters are the most important, because they define the coordinate system of the camera, and its position in the scene. The {\bf CENTER} parameter is an X, Y, Z coordinate defining the center of the camera {\em (also known as the Center of Projection)}. Once you have determined where the camera will be placed in the scene, you need to tell \RAY\ what the camera should be looking at. The {\bf VIEWDIR} parameter is a vector indicating the direction the camera is facing. It may be useful for me to add a "Look At" type keyword in the future to make camera aiming easier. If people want or need the "Look At" style camera, let me know. The last parameter needed to completely define a camera is the "up" direction. The {\bf UPDIR} parameter is a vector which points in the direction of the "sky". I wrote the camera so that {\bf VIEWDIR} and {\bf UPDIR} don't have to be perpendicular, and there shouldn't be a need for a "right" vector although some other ray tracers require it. Here's a snippet of a camera definition: \begin{verbatim} CAMERA ZOOM 1.0 ASPECTRATIO 1.0 ANTIALIASING 0 RAYDEPTH 12 CENTER 0.0 0.0 2.0 VIEWDIR 0 0 -1 UPDIR 0 1 0 END_CAMERA \end{verbatim} \subsubsection{Viewing frustum} \index{camera!viewing frustum} An optional {\bf FRUSTUM} parameter provides a means for rendering sub-images in a larger frame, and correct stereoscopic images. The {\bf FRUSTUM} keyword must be followed by four floating parameters, which indicate the top, bottom, left and right coordinates of the image plane in eye coordinates. When the projection mode is set to {\bf FISHEYE}, the frustum parameters correspond to spherical coordinates specified in radians. \begin{verbatim} CAMERA ZOOM 1.0 ASPECTRATIO 1.0 ANTIALIASING 0 RAYDEPTH 4 CENTER 0.0 0.0 -6.0 VIEWDIR 0.0 0.0 1.0 UPDIR 0.0 1.0 0.0 FRUSTUM -0.5 0.5 -0.5 0.5 END_CAMERA \end{verbatim}
{ "alphanum_fraction": 0.7633455386, "avg_line_length": 41.8467741935, "ext": "tex", "hexsha": "29ca37b6cb70172d3cdfe09092f99fb51fa0a12b", "lang": "TeX", "max_forks_count": 98, "max_forks_repo_forks_event_max_datetime": "2020-11-21T18:22:13.000Z", "max_forks_repo_forks_event_min_datetime": "2019-08-30T14:29:16.000Z", "max_forks_repo_head_hexsha": "9f55e03c9d80c024a75029d0e842cc5c92f31c82", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "rhudson2802/sunflower-simulator", "max_forks_repo_path": "benchmarks/source/superh/ALPBench/Ray_Trace/tachyon/docs/camera.tex", "max_issues_count": 80, "max_issues_repo_head_hexsha": "9f55e03c9d80c024a75029d0e842cc5c92f31c82", "max_issues_repo_issues_event_max_datetime": "2020-12-16T11:56:19.000Z", "max_issues_repo_issues_event_min_datetime": "2019-08-27T14:43:46.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "rhudson2802/sunflower-simulator", "max_issues_repo_path": "benchmarks/source/superh/ALPBench/Ray_Trace/tachyon/docs/camera.tex", "max_line_length": 77, "max_stars_count": 7, "max_stars_repo_head_hexsha": "9f55e03c9d80c024a75029d0e842cc5c92f31c82", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "rhudson2802/sunflower-simulator", "max_stars_repo_path": "benchmarks/source/superh/ALPBench/Ray_Trace/tachyon/docs/camera.tex", "max_stars_repo_stars_event_max_datetime": "2019-07-08T03:42:24.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-07T13:38:33.000Z", "num_tokens": 1420, "size": 5189 }
\PassOptionsToPackage{unicode=true}{hyperref} % options for packages loaded elsewhere \PassOptionsToPackage{hyphens}{url} % \documentclass[]{book} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \usepackage{fixltx2e} % provides \textsubscript \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{textcomp} % provides euro and other symbols \else % if luatex or xelatex \usepackage{unicode-math} \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} \fi % use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} % use microtype if available \IfFileExists{microtype.sty}{% \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt} } \usepackage{hyperref} \hypersetup{ pdftitle={Survey in Ecological Data Collection Tools \& Products}, pdfauthor={Dr.~Katharyn Duffy, Dr.~Ben Ruddell}, pdfborder={0 0 0}, breaklinks=true} \urlstyle{same} % don't use monospace font for urls \usepackage{color} \usepackage{fancyvrb} \newcommand{\VerbBar}{|} \newcommand{\VERB}{\Verb[commandchars=\\\{\}]} \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} % Add ',fontsize=\small' for more characters per line \usepackage{framed} \definecolor{shadecolor}{RGB}{248,248,248} \newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}} \newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}} \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}} \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\BuiltInTok}[1]{#1} \newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}} \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}} \newcommand{\ExtensionTok}[1]{#1} \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\ImportTok}[1]{#1} \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} \newcommand{\NormalTok}[1]{#1} \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}} \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}} \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} \newcommand{\RegionMarkerTok}[1]{#1} \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \usepackage{longtable,booktabs} % Fix footnotes in tables (requires footnote package) \IfFileExists{footnote.sty}{\usepackage{footnote}\makesavenoteenv{longtable}}{} \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} % Redefines (sub)paragraphs to behave more like sections \ifx\paragraph\undefined\else \let\oldparagraph\paragraph \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} \fi \ifx\subparagraph\undefined\else \let\oldsubparagraph\subparagraph \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} \fi % set default figure placement to htbp \makeatletter \def\fps@figure{htbp} \makeatother \usepackage{booktabs} \usepackage{amsthm} \makeatletter \def\thm@space@setup{% \thm@preskip=8pt plus 2pt minus 4pt \thm@postskip=\thm@preskip } \makeatother \usepackage[]{natbib} \bibliographystyle{apalike} \title{Survey in Ecological Data Collection Tools \& Products} \author{Dr.~Katharyn Duffy, Dr.~Ben Ruddell} \date{2020-07-31} \begin{document} \maketitle { \setcounter{tocdepth}{1} \tableofcontents } \hypertarget{preface}{% \chapter*{Preface}\label{preface}} \addcontentsline{toc}{chapter}{Preface} Welcome to our live INF550 course textbook. This live text is a combination of materials, videos, example code, and assignments. \hypertarget{acknowledgements}{% \section*{Acknowledgements}\label{acknowledgements}} \addcontentsline{toc}{section}{Acknowledgements} As with all open source data and tools, this book is built upon the shoulders of giants who came before me. Special thanks to Megan Jones and Donal O'Leary at Battelle for their support in pulling NEON materials. \hypertarget{ecoinformatics-tools}{% \chapter{Ecoinformatics Tools}\label{ecoinformatics-tools}} As an Ecoinformatician you \emph{need} to be able to: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Pull data from Application Programing Interfaces (APIs) \begin{itemize} \tightlist \item More on this in Chapter 2 \end{itemize} \item Organize and document your code and data \item Version control your code to avoid disaster and make it reproducible \begin{itemize} \tightlist \item For you, your collaborators, and/or the wider community \end{itemize} \item Push your code up to public-facing repositories \item Pull other's code from public repositories. \end{enumerate} More thoughts on the benefits and power of reproducibility \href{https://github.com/katharynduffy/ECOSS_reproducible_science}{can be found here} To be successful, both in this course and in your careers you will need these skills. This is why \textbf{they are a requirement} for this course. If you already using these skills on a daily basis, fantastic! If some of this sounds a little greek, I have placed lesson links throughout this chapter so that you can build these skills and be successful in this course. \hypertarget{pre-course-skills-setup}{% \section{Pre-Course Skills \& Setup}\label{pre-course-skills-setup}} For the purpose of this course we will largely be using the following tools to access, pull, and explore data: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item R \& Rstudio \item Git, GitHub, \& Atom.io \item Markdown \& Rmarkdown \end{enumerate} As such we will need to install and/or update these tools on your personal computer \emph{before} our first day of class. While we chose R for this course, nearly all of the packages and data are fully available and transferable to Python or other languages. If you'd like to brush up on your R skills I highly recommend Data Carpentry Boostcamp's free \href{http://swcarpentry.github.io/r-novice-gapminder}{R for Reproducible Scientific Analysis} course. \hypertarget{installing-or-updating-r}{% \subsection{Installing or Updating R}\label{installing-or-updating-r}} Please check your version of R. You will need R 3.6.0+ How to check your version in R or RStudio if you already have it: \begin{verbatim} > version _ platform x86_64-apple-darwin15.6.0 arch x86_64 os darwin15.6.0 system x86_64, darwin15.6.0 status major 3 minor 5.1 year 2018 month 07 day 02 svn rev 74947 language R version.string R version 3.5.1 (2018-07-02) nickname Feather Spray \end{verbatim} If you don't already have R or need to update it \href{https://cran.rstudio.com/}{do so here.} \hypertarget{windows-rrstudio-setup}{% \subsection{Windows R/RStudio Setup}\label{windows-rrstudio-setup}} After you have downloaded R, run the .exe file that was just downloaded Go to the \href{https://www.rstudio.com/products/rstudio/download/\#download}{RStudio Download page} Under Installers select RStudio X.XX.XXX - e.g.~Windows Vista/7/8/10 Double click the file to install it Once R and RStudio are installed, click to open RStudio. If you don't get any error messages you are set. If there is an error message, you will need to re-install the program. \hypertarget{mac-rrstudio-setup}{% \subsection{Mac R/RStudio Setup}\label{mac-rrstudio-setup}} After you have downloaded R, double click on the file that was downloaded and R will install Go to the \href{https://www.rstudio.com/products/rstudio/download/\#download}{RStudio Download page} Under Installers select RStudio 1.2.1135 - Mac OS X XX.X (64-bit) to download it. Once it's downloaded, double click the file to install it. Once R and RStudio are installed, click to open RStudio. If you don't get any error messages you are set. If there is an error message, you will need to re-install the program. \hypertarget{linux-rrstudio-setup}{% \section{Linux R/RStudio Setup}\label{linux-rrstudio-setup}} R is available through most Linux package managers. You can download the binary files for your distribution from CRAN. Or you can use your package manager. e.g.~for Debian/Ubuntu \begin{verbatim} run sudo apt-get install r-base \end{verbatim} and for Fedora \begin{verbatim} run sudo yum install R \end{verbatim} To install RStudio, go to the \href{https://www.rstudio.com/products/rstudio/download/\#download}{RStudio Download page} Under Installers select the version for your distribution. Once it's downloaded, double click the file to install it Once R and RStudio are installed, click to open RStudio. If you don't get any error messages you are set. If there is an error message, you will need to re-install the program. \hypertarget{install-basic-packages-for-this-course}{% \subsection{Install basic packages for this course}\label{install-basic-packages-for-this-course}} You can run the following script to make sure all the required packages are properly installed on your computer. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# list of required packages} \NormalTok{list.of.packages <-}\StringTok{ }\KeywordTok{c}\NormalTok{(} \StringTok{'data.table'}\NormalTok{,} \StringTok{'tidyverse'}\NormalTok{,} \StringTok{'jsonlite'}\NormalTok{,} \StringTok{'jpeg'}\NormalTok{,} \StringTok{'png'}\NormalTok{,} \StringTok{'raster'}\NormalTok{,} \StringTok{'rgdal'}\NormalTok{,} \StringTok{'rmarkdown'}\NormalTok{, } \StringTok{'knitr'} \NormalTok{)} \CommentTok{# identify new (not installed) packages} \NormalTok{new.packages <-}\StringTok{ }\NormalTok{list.of.packages[}\OperatorTok{!}\NormalTok{(list.of.packages }\OperatorTok{%in%}\StringTok{ }\KeywordTok{installed.packages}\NormalTok{()[,}\StringTok{"Package"}\NormalTok{])]} \CommentTok{# install new (not installed) packages} \ControlFlowTok{if}\NormalTok{(}\KeywordTok{length}\NormalTok{(new.packages)) } \KeywordTok{install.packages}\NormalTok{(new.packages, } \DataTypeTok{repos=}\StringTok{'http://cran.rstudio.com/'}\NormalTok{)} \CommentTok{# load all of the required libraries} \KeywordTok{sapply}\NormalTok{(list.of.packages, library, }\DataTypeTok{character.only =}\NormalTok{ T)} \end{Highlighting} \end{Shaded} \textbf{Note}: On some operating systems, you may need to install the Geospatial Data Abstraction Library (GDAL). More information about GDAL can be found from \href{https://trac.osgeo.org/gdal/wiki/DownloadingGdalBinaries}{here}. \hypertarget{installing-and-setting-up-git-github-on-your-machine}{% \section{Installing and Setting up Git \& Github on Your Machine}\label{installing-and-setting-up-git-github-on-your-machine}} For this course you will need: 1. Git installed on your local machine 2. Very basic bash scripting 3. A linked GitHub account 4. To link RStudio to git via RStudio or Atom.io As we will be using these skills constantly, they are a \emph{pre-requisite} for this course. If you don't yet have these skills it's okay! You can learn everything that you need to know via the following freely available resources: *\href{http://swcarpentry.github.io/shell-novice}{The Unix Shell} *\href{http://swcarpentry.github.io/git-novice}{Version Control with Git} *\href{https://happygitwithr.com/}{Happy Git with R} If you are learning these skills from scratch I estimate that you will need to devote \textasciitilde{}4-6 hours to get set up and comfortable with the various workflows. Alo remember that I have code office hours every week and that Stack Exchange is your friend. \hypertarget{installing-atom}{% \section{Installing Atom}\label{installing-atom}} \href{https://atom.io/}{Atom.io} is a powerful and useful text editor for the follwng reasons: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item It is langugae agnostic \item It fully integrates with git and github + You can use it to push/pull/resolve conflicts and write code all in one space. \end{enumerate} \hypertarget{linking-rstudio-to-git}{% \section{Linking RStudio to Git}\label{linking-rstudio-to-git}} \href{https://happygitwithr.com/rstudio-git-github.html}{Happy Git with R} has a fantastic tutorial to help you link Rstudio-Git-Github on your local machine and push/pull from or to public repositories. \hypertarget{how-we-will-be-conducting-this-course}{% \section{How we will be Conducting this Course}\label{how-we-will-be-conducting-this-course}} At the end of each chapter you will find a set of \textbf{Exercises}. At the end of the assigned chapter you will be expected to submit via BBLearn two files: 1. An \href{https://rstudio.com/wp-content/uploads/2015/02/rmarkdown-cheatsheet.pdf}{RMarkdown file} with the naming convention: LASTNAME\_COURSECODE\_Section\#.Rmd, and 2. A knitted .PDF with the same naming convention: LASTNAME\_COURSECODE\_Section\#.pdf To generate these files you have two options: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Click on the pencil and pad logo in the top of this text, copy the exercise section code, and drop it into your own .Rmd. \item Git clone our \href{https://github.com/katharynduffy/katharynduffy.github.io}{course Github Repository}, navigate to the `Exercises' folder, and use that .Rmd as a template. \end{enumerate} \emph{Note: Exercises submited in any other format, or those missing questions will not be graded} To generate your .PDF to upload, in your RMarkdown file simply push the `Knit' button at the top of your document. \hypertarget{introduction-to-neon-its-data}{% \chapter{Introduction to NEON \& its Data}\label{introduction-to-neon-its-data}} \begin{longtable}[]{@{}l@{}} \toprule \endhead Estimated Time: 1 hour\tabularnewline \bottomrule \end{longtable} Here we will broadly overview of the National Ecological Observatory Network (NEON). Please carefully read through these materials and links that discuss NEON's mission and design. \leavevmode\hypertarget{ds-challenge}{}% \textbf{Course participants}: As you review this information, please consider the final course project that you will work on at the over this semester. At the end of week two, you will document an initial research question or idea and associated data needed to address that question, that you may want to explore while pursuing this course. \leavevmode\hypertarget{ds-objectives}{}% -- \hypertarget{learning-objectives}{% \section{Learning Objectives}\label{learning-objectives}} At the end of this activity, you will be able to: \begin{itemize} \tightlist \item Explain the mission of the National Ecological Observatory Network (NEON). \item Explain the how sites are located within the NEON project design. \item Explain the different types of data that will be collected and provided by NEON. \end{itemize} -- \hypertarget{the-neon-project-mission-design}{% \section{The NEON Project Mission \& Design}\label{the-neon-project-mission-design}} To capture ecological heterogeneity across the United States, NEON's design divides the continent into 20 statistically different eco-climatic domains. Each NEON field site is located within an eco-climatic domain. \hypertarget{the-science-and-design-of-neon}{% \subsubsection{The Science and Design of NEON}\label{the-science-and-design-of-neon}} To gain a better understanding of the broad scope fo NEON watch this 4 minute long video. Please, read the following page about NEON's mission. \hypertarget{neons-spatial-design}{% \section{NEON's Spatial Design}\label{neons-spatial-design}} \hypertarget{the-spatial-design-of-neon}{% \subsubsection{The Spatial Design of NEON}\label{the-spatial-design-of-neon}} Watch this 4:22 minute video exploring the spatial design of NEON field sites. Please read the following page about NEON's Spatial Design: \begin{itemize} \item Read this primer on NEON's Sampling Design \item Read about the different types of field sites - core and relocatable \end{itemize} \hypertarget{neon-field-site-locations}{% \subsection{NEON Field Site Locations}\label{neon-field-site-locations}} Explore the NEON Field Site map taking note of the locations of \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Aquatic \& terrestrial field sites. \item Core \& relocatable field sites. \end{enumerate} Click here to view the NEON Field Site Map Explore the NEON field site map. Do the following: \begin{itemize} \tightlist \item Zoom in on a study area of interest to see if there are any NEON field sites that are nearby. \item Click the ``More'' button in the \textbf{upper right hand} corner of the map to filter sites by name, site host, domain or state. \item Select one field site of interest. \begin{itemize} \tightlist \item Click on the marker in the map. \item Then click on the name of the field site to jump to the field site landing page. \end{itemize} \end{itemize} \leavevmode\hypertarget{ds-dataTip}{}% \textbf{Data Tip:} You can download maps, kmz, or shapefiles of the field sites here. \hypertarget{neon-data}{% \section{NEON Data}\label{neon-data}} \hypertarget{how-neon-collects-data}{% \subsubsection{How NEON Collects Data}\label{how-neon-collects-data}} Watch this 3:06 minute video exploring the data that NEON collects. Read the Data Collection Methods page to learn more about the different types of data that NEON collects and provides. Then, follow the links below to learn more about each collection method: \begin{itemize} \tightlist \item Aquatic Observation System (AOS) \item Aquatic Instrument System (AIS) \item Terrestrial Instrument System (TIS) -- Flux Tower \item Terrestrial Instrument System (TIS) -- Soil Sensors and Measurements \item Terrestrial Organismal System (TOS) \item Airborne Observation Platform (AOP) \end{itemize} All data collection protocols and processing documents are publicly available. Read more about the standardized protocols and how to access these documents. \hypertarget{specimens-samples}{% \subsection{Specimens \& Samples}\label{specimens-samples}} NEON also collects samples and specimens from which the other data products are based. These samples are also available for research and education purposes. Learn more: NEON Biorepository. \hypertarget{airborne-remote-sensing}{% \subsection{Airborne Remote Sensing}\label{airborne-remote-sensing}} Watch this 5 minute video to better understand the NEON Airborne Observation Platform (AOP). \leavevmode\hypertarget{ds-dataTip}{}% \textbf{Data Tip:} NEON also provides support to your own research including proposals to fly the AOP over other study sites, a mobile tower/instrumentation setup and others. Learn more here the Assignable Assets programs . \hypertarget{accessing-neon-data}{% \subsection{Accessing NEON Data}\label{accessing-neon-data}} NEON data are processed and go through quality assurance quality control checks at NEON headquarters in Boulder, CO. NEON carefully documents every aspect of sampling design, data collection, processing and delivery. This documentation is freely available through the NEON data portal. \begin{itemize} \tightlist \item Visit the NEON Data Portal - data.neonscience.org \item Read more about the quality assurance and quality control processes for NEON data and how the data are processed from raw data to higher level data products. \item Explore NEON Data Products. On the page for each data product in the catalog you can find the basic information about the product, find the data collection and processing protocols, and link directly to downloading the data. \item Additionally, some types of NEON data are also available through the data portals of other organizations. For example, NEON Terrestrial Insect DNA Barcoding Data is available through the Barcode of Life Datasystem (BOLD). Or NEON phenocam images are available from the Phenocam network site. More details on where else the data are available from can be found in the Availability and Download section on the Product Details page for each data product (visit Explore Data Products to access individual Product Details pages). \end{itemize} \hypertarget{pathways-to-access-neon-data}{% \subsection{Pathways to access NEON Data}\label{pathways-to-access-neon-data}} There are several ways to access data from NEON: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Via the NEON data portal. Explore and download data. Note that much of the tabular data is available in zipped .csv files for each month and site of interest. To combine these files, use the neonUtilities package (R tutorial, Python tutorial).\\ \item Use R or Python to programmatically access the data. NEON and community members have created code packages to directly access the data through an API. Learn more about the available resources by reading the Code Resources page or visiting the NEONScience GitHub repo.\\ \item Using the NEON API. Access NEON data directly using a custom API call. \item Access NEON data through partner's portals. Where NEON data directly overlap with other community resources, NEON data can be accessed through the portals. Examples include Phenocam, BOLD, Ameriflux, and others. You can learn more in the documentation for individual data products. \end{enumerate} \hypertarget{accessing-neon-data-1}{% \section{Accessing NEON Data}\label{accessing-neon-data-1}} \hypertarget{via-a-neon-api-token}{% \subsection{Via a NEON API Token}\label{via-a-neon-api-token}} NEON data can be downloaded from either the NEON Data Portal or the NEON API. When downloading from the Data Portal, you can create a user account. Read about the benefits of an account on the User Account page. You can also use your account to create a token for using the API. Your token is unique to your account, so don't share it. While using a token is optional in general, it is required for this course. Using a token when downloading data via the API, including when using the neonUtilities package, links your downloads to your user account, as well as enabling faster download speeds. For more information about token usage and benefits, see the NEON API documentation page. For now, in addition to faster downloads, using a token helps NEON to track data downloads. Using \textbf{anonymized} user information, they can then calculate data access statistics, such as which data products are downloaded most frequently, which data products are downloaded in groups by the same users, and how many users in total are downloading data. This information helps NEON to evaluate the growth and reach of the observatory, and to advocate for training activities, workshops, and software development. Tokens can be used whenever you use the NEON API. In this tutorial, we'll focus on using tokens with the neonUtilities R package. \hypertarget{ds-objectives}{} \hypertarget{objectives}{% \section{Objectives}\label{objectives}} After completing this activity, you will be able to: \begin{itemize} \tightlist \item Create a NEON API token \item Use your token when downloading data with neonUtilities \end{itemize} \hypertarget{things-youll-need-to-complete-this-tutorial}{% \section{Things You'll Need To Complete This Tutorial}\label{things-youll-need-to-complete-this-tutorial}} You will need a version of R (3.4.1 or higher) and, preferably, \texttt{RStudio} loaded on your computer to complete this tutorial. \hypertarget{install-r-packages}{% \subsection{Install R Packages}\label{install-r-packages}} \begin{itemize} \tightlist \item \textbf{neonUtilities:} \texttt{install.packages("neonUtilities")} \end{itemize} \hypertarget{additional-resources}{% \section{Additional Resources}\label{additional-resources}} \begin{itemize} \tightlist \item NEON Data Portal \item NEONScience GitHub Organization \item neonUtilities tutorial \end{itemize} If you've never downloaded NEON data using the neonUtilities package before, we recommend starting with the Download and Explore tutorial before proceeding with this tutorial. In the next sections, we'll get an API token from the NEON Data Portal, and then use it in neonUtilities when downloading data. \hypertarget{get-a-neon-api-token}{% \section{Get a NEON API Token}\label{get-a-neon-api-token}} The first step is create a NEON user account, if you don't have one. Follow the instructions on the Data Portal User Accounts page. If you do already have an account, go to the NEON Data Portal, sign in, and go to your My Account profile page. Once you have an account, you can create an API token for yourself. At the bottom of the My Account page, you should see this bar: \begin{verbatim} <a href="{{ site.baseurl }}/images/NEON-api-token/get-api-token-button.png"> <img src="{{ site.baseurl }}/images/NEON-api-token/get-api-token-button.png" alt="Account page on NEON Data Portal showing Get API Token button."></a> \end{verbatim} Click the `GET API TOKEN' button. After a moment, you should see this: \begin{verbatim} <a href="{{ site.baseurl }}/images/NEON-api-token/account-page-token-view.png"> <img src="{{ site.baseurl }}/images/NEON-api-token/account-page-token-view.png" alt="Account page on NEON Data Portal showing API token has been created."></a> \end{verbatim} Click on the Copy button to copy your API token to the clipboard: \begin{verbatim} <a href="{{ site.baseurl }}/images/NEON-api-token/api-token-copy-button.png"> <img src="{{ site.baseurl }}/images/NEON-api-token/api-token-copy-button.png" alt="Account page on NEON Data Portal showing API token with Copy button highlighted"></a> \end{verbatim} \hypertarget{use-api-token-in-neonutilities}{% \section{Use API token in neonUtilities}\label{use-api-token-in-neonutilities}} In the next section, we'll walk through saving your token somewhere secure but accessible to your code. But first let's try out using the token the easy way. First, we need to load the \texttt{neonUtilities} package and set the working directory: \begin{Shaded} \begin{Highlighting}[] \CommentTok{# install neonUtilities - can skip if already installed, but} \CommentTok{# API tokens are only enabled in neonUtilities v1.3.4 and higher} \CommentTok{# if your version number is lower, re-install} \KeywordTok{install.packages}\NormalTok{(}\StringTok{"neonUtilities"}\NormalTok{)} \CommentTok{# load neonUtilities} \KeywordTok{library}\NormalTok{(neonUtilities)} \CommentTok{# set working directory} \NormalTok{wd <-}\StringTok{ "~/data"} \CommentTok{# this will depend on your local machine} \KeywordTok{setwd}\NormalTok{(wd)} \end{Highlighting} \end{Shaded} NEON API tokens are very long, so it would be annoying to keep pasting the entire text string into functions. Assign your token an object name: \begin{Shaded} \begin{Highlighting}[] \NormalTok{NEON_TOKEN <-}\StringTok{ "PASTE YOUR TOKEN HERE"} \end{Highlighting} \end{Shaded} Now we'll use the \texttt{loadByProduct()} function to download data. Your API token is entered as the optional \texttt{token} input parameter. For this example, we'll download Plant foliar traits (DP1.10026.001). \begin{Shaded} \begin{Highlighting}[] \NormalTok{foliar <-}\StringTok{ }\KeywordTok{loadByProduct}\NormalTok{(}\DataTypeTok{dpID=}\StringTok{"DP1.10026.001"}\NormalTok{, }\DataTypeTok{site=}\StringTok{"all"}\NormalTok{, } \DataTypeTok{package=}\StringTok{"expanded"}\NormalTok{, }\DataTypeTok{check.size=}\NormalTok{F,} \DataTypeTok{token=}\NormalTok{NEON_TOKEN)} \end{Highlighting} \end{Shaded} You should now have data saved in the \texttt{foliar} object; the API silently used your token. If you've downloaded data without a token before, you may notice this is faster! This format applies to all \texttt{neonUtilities} functions that involve downloading data or otherwise accessing the API; you can use the \texttt{token} input with all of them. For example, when downloading remote sensing data: \begin{Shaded} \begin{Highlighting}[] \NormalTok{chm <-}\StringTok{ }\KeywordTok{byTileAOP}\NormalTok{(}\DataTypeTok{dpID=}\StringTok{"DP3.30015.001"}\NormalTok{, }\DataTypeTok{site=}\StringTok{"WREF"}\NormalTok{, } \DataTypeTok{year=}\DecValTok{2017}\NormalTok{, }\DataTypeTok{check.size=}\NormalTok{F,} \DataTypeTok{easting=}\KeywordTok{c}\NormalTok{(}\DecValTok{571000}\NormalTok{,}\DecValTok{578000}\NormalTok{), } \DataTypeTok{northing=}\KeywordTok{c}\NormalTok{(}\DecValTok{5079000}\NormalTok{,}\DecValTok{5080000}\NormalTok{), } \DataTypeTok{savepath=}\NormalTok{wd,} \DataTypeTok{token=}\NormalTok{NEON_TOKEN)} \end{Highlighting} \end{Shaded} \hypertarget{token-management-for-open-code}{% \section{Token management for open code}\label{token-management-for-open-code}} Your API token is unique to your account, so don't share it! If you're writing code that will be shared with colleagues or available publicly, such as in a GitHub repository or supplemental materials of a published paper, you can't include the line of code above where we assigned your token to \texttt{NEON\_TOKEN}, since your token is fully visible in the code there. Instead, you'll need to save your token locally on your computer, and pull it into your code without displaying it. There are a few ways to do this, we'll show two options here. \begin{itemize} \item Option 1: Save the token in a local file, and \texttt{source()} that file at the start of every script. This is fairly simple but requires a line of code in every script. \item Option 2: Add the token to a \texttt{.Renviron} file to create an environment variable that gets loaded when you open R. This is a little harder to set up initially, but once it's done, it's done globally, and it will work in every script you run. \end{itemize} \hypertarget{option-1-save-token-in-a-local-file}{% \section{Option 1: Save token in a local file}\label{option-1-save-token-in-a-local-file}} Open a new, empty R script (.R). Put a single line of code in the script: \begin{Shaded} \begin{Highlighting}[] \NormalTok{NEON_TOKEN <-}\StringTok{ "PASTE YOUR TOKEN HERE"} \end{Highlighting} \end{Shaded} Save this file in a logical place on your machine, somewhere that won't be visible publicly. Here, let's call the file \texttt{neon\_token\_source.R}, and save it to the working directory. Then, at the start of every script where you're going to use the NEON API, you would run this line of code: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{source}\NormalTok{(}\KeywordTok{paste0}\NormalTok{(wd, }\StringTok{"/neon_token_source.R"}\NormalTok{))} \end{Highlighting} \end{Shaded} Then you'll be able to use \texttt{token=NEON\_TOKEN} when you run \texttt{neonUtilities} functions, and you can share your code without accidentally sharing your token. \hypertarget{option-2-save-token-to-the-r-environment}{% \section{Option 2: Save token to the R environment}\label{option-2-save-token-to-the-r-environment}} To create a persistent environment variable, we use a \texttt{.Renviron} file. Before creating a file, check which directory R is using as your home directory: \begin{Shaded} \begin{Highlighting}[] \CommentTok{# For Windows:} \KeywordTok{Sys.getenv}\NormalTok{(}\StringTok{"R_USER"}\NormalTok{)} \CommentTok{# For Mac/Linux:} \KeywordTok{Sys.getenv}\NormalTok{(}\StringTok{"HOME"}\NormalTok{)} \end{Highlighting} \end{Shaded} Check the home directory to see if you already have a \texttt{.Renviron} file, \textbf{using the file browse pane in RStudio}, or using another file browse method with hidden files shown. Files that begin with \texttt{.} are hidden by default, but RStudio recognizes files that begin with \texttt{.R} and displays them. \begin{verbatim} <a href="{{ site.baseurl }}/images/NEON-api-token/R-environ-file-browse.png"> <img src="{{ site.baseurl }}/images/NEON-api-token/R-environ-file-browse.png" alt="File browse pane in RStudio showing .Renviron file."></a> <figcaption>Screenshot of file browse pane with .Renviron file. </figcaption> \end{verbatim} If you already have a \texttt{.Renviron} file, open it and follow the instructions below to add to it. If you don't have one, create one using File -\textgreater{} New File -\textgreater{} Text File in the RStudio menus. Add one line to the text file. In this option, there are no quotes around the token value. \begin{Shaded} \begin{Highlighting}[] \NormalTok{NEON_TOKEN=PASTE YOUR TOKEN HERE} \end{Highlighting} \end{Shaded} Save the file as \texttt{.Renviron}, in the RStudio home directory identified above. Double check the spelling, this will not work if you have a typo. Re-start R to load the environment. Once your token is assigned to an environment variable, use the function \texttt{Sys.getenv()} to access it. For example, in \texttt{loadByProduct()}: \begin{Shaded} \begin{Highlighting}[] \NormalTok{foliar <-}\StringTok{ }\KeywordTok{loadByProduct}\NormalTok{(}\DataTypeTok{dpID=}\StringTok{"DP1.10026.001"}\NormalTok{, }\DataTypeTok{site=}\StringTok{"all"}\NormalTok{, } \DataTypeTok{package=}\StringTok{"expanded"}\NormalTok{, }\DataTypeTok{check.size=}\NormalTok{F,} \DataTypeTok{token=}\KeywordTok{Sys.getenv}\NormalTok{(}\StringTok{"NEON_TOKEN"}\NormalTok{))} \end{Highlighting} \end{Shaded} \hypertarget{part-2-pulling-neon-data-via-the-api}{% \subsubsection{Part 2: Pulling NEON Data via the API}\label{part-2-pulling-neon-data-via-the-api}} This is a tutorial in pulling data from the NEON API or Application Programming Interface. The tutorial uses R and the R package httr, but the core information about the API is applicable to other languages and approaches. \hypertarget{neon-data-1}{% \section{NEON data}\label{neon-data-1}} As a reminder, there are 3 basic categories of NEON data: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Observational - Data collected by a human in the field, or in an analytical laboratory, e.g.~beetle identification, foliar isotopes \item Instrumentation - Data collected by an automated, streaming sensor, e.g.~net radiation, soil carbon dioxide \item Remote sensing - Data collected by the airborne observation platform, e.g. LIDAR, surface reflectance \end{enumerate} This lab covers all three types of data, it is required to complete these sections in order and not skip ahead, since the query principles are explained in the first section, on observational data. \hypertarget{ds-objectives}{} \hypertarget{objectives-1}{% \section{Objectives}\label{objectives-1}} After completing this activity, you will be able to: \begin{itemize} \tightlist \item Pull observational, instrumentation, and geolocation data from the NEON API. \item Transform API-accessed data from JSON to tabular format for analyses. \end{itemize} \hypertarget{things-youll-need-to-complete-this-tutorial-1}{% \section{Things You'll Need To Complete This Tutorial}\label{things-youll-need-to-complete-this-tutorial-1}} To complete this tutorial you will need the most current version of R and, preferably, RStudio loaded on your computer. \hypertarget{install-r-packages-1}{% \subsection{Install R Packages}\label{install-r-packages-1}} \begin{itemize} \tightlist \item \textbf{httr:} \texttt{install.packages("httr")} \item \textbf{jsonlite:} \texttt{install.packages("jsonlite")} \item \textbf{dplyr:} \texttt{install.packages("dplyr")} \item \textbf{devtools:} \texttt{install.packages("devtools")} \item \textbf{downloader:} \texttt{install.packages("downloader")} \item \textbf{geoNEON:} \texttt{devtools::install\_github("NEONScience/NEON-geolocation/geoNEON")} \item \textbf{neonUtilities:} \texttt{devtools::install\_github("NEONScience/NEON-utilities/neonUtilities")} \end{itemize} Note, you must have devtools installed \& loaded, prior to loading geoNEON or neonUtilities. \hypertarget{additional-resources-1}{% \subsection{Additional Resources}\label{additional-resources-1}} \begin{itemize} \tightlist \item Webpage for the NEON API \item GitHub repository for the NEON API \item ROpenSci wrapper for the NEON API (not covered in this tutorial) \end{itemize} \hypertarget{what-is-an-api}{% \section{What is an API?}\label{what-is-an-api}} If you are unfamiliar with the concept of an API, think of an API as a `middle person' that provides a communication path for a software application to obtain information from a digital data source. APIs are becoming a very common means of sharing digital information. Many of the apps that you use on your computer or mobile device to produce maps, charts, reports, and other useful forms of information pull data from multiple sources using APIs. In the ecological and environmental sciences, many researchers use APIs to programmatically pull data into their analyses. (Quoted from the NEON Observatory Blog story: API and data availability viewer now live on the NEON data portal.) \hypertarget{anatomy-of-an-api-call}{% \section{Anatomy of an API call}\label{anatomy-of-an-api-call}} An example API call: \url{http://data.neonscience.org/api/v0/data/DP1.10003.001/WOOD/2015-07} This includes the base URL, endpoint, and target. \hypertarget{base-url}{% \subsection{Base URL:}\label{base-url}} {\url{http://data.neonscience.org/api/v0}}{/data/DP1.10003.001/WOOD/2015-07} Specifics are appended to this in order to get the data or metadata you're looking for, but all calls to this API will include the base URL. For the NEON API, this is \url{http://data.neonscience.org/api/v0} -- not clickable, because the base URL by itself will take you nowhere! \hypertarget{endpoints}{% \subsection{Endpoints:}\label{endpoints}} {\url{http://data.neonscience.org/api/v0}}{/data}{/DP1.10003.001/WOOD/2015-07} What type of data or metadata are you looking for? \begin{itemize} \item \textbf{\textasciitilde{}/products} Information about one or all of NEON's data products \item \textbf{\textasciitilde{}/sites} Information about data availability at the site specified in the call \item \textbf{\textasciitilde{}/locations} Spatial data for the NEON locations specified in the call \item \textbf{\textasciitilde{}/data} Data! By product, site, and date (in monthly chunks). \end{itemize} \hypertarget{targets}{% \subsection{Targets:}\label{targets}} {\url{http://data.neonscience.org/api/v0/data}}{/DP1.10003.001/WOOD/2015-07} The specific data product, site, or location you want to get data for. \hypertarget{observational-data-os}{% \section{Observational data (OS)}\label{observational-data-os}} Which product do you want to get data for? Consult the Explore Data Products page. We'll pick Breeding landbird point counts, DP1.10003.001 First query the products endpoint of the API to find out which sites and dates have data available. In the products endpoint, the target is the numbered identifier for the data product: \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Load the necessary libraries} \KeywordTok{library}\NormalTok{(httr)} \KeywordTok{library}\NormalTok{(jsonlite)} \KeywordTok{library}\NormalTok{(dplyr, }\DataTypeTok{quietly=}\NormalTok{T)} \KeywordTok{library}\NormalTok{(downloader)} \CommentTok{# Request data using the GET function & the API call} \NormalTok{req <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(}\StringTok{"http://data.neonscience.org/api/v0/products/DP1.10003.001"}\NormalTok{)} \NormalTok{req} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Response [https://data.neonscience.org/api/v0/products/DP1.10003.001] ## Date: 2020-07-31 21:03 ## Status: 200 ## Content-Type: application/json;charset=UTF-8 ## Size: 24.2 kB \end{verbatim} The object returned from \texttt{GET()} has many layers of information. Entering the name of the object gives you some basic information about what you downloaded. The \texttt{content()} function returns the contents in the form of a highly nested list. This is typical of JSON-formatted data returned by APIs. We can use the \texttt{names()} function to view the different types of information within this list. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# View requested data} \NormalTok{req.content <-}\StringTok{ }\KeywordTok{content}\NormalTok{(req, }\DataTypeTok{as=}\StringTok{"parsed"}\NormalTok{)} \KeywordTok{names}\NormalTok{(req.content}\OperatorTok{$}\NormalTok{data)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "productCodeLong" "productCode" ## [3] "productCodePresentation" "productName" ## [5] "productDescription" "productStatus" ## [7] "productCategory" "productHasExpanded" ## [9] "productScienceTeamAbbr" "productScienceTeam" ## [11] "productPublicationFormatType" "productAbstract" ## [13] "productDesignDescription" "productStudyDescription" ## [15] "productBasicDescription" "productExpandedDescription" ## [17] "productSensor" "productRemarks" ## [19] "themes" "changeLogs" ## [21] "specs" "keywords" ## [23] "siteCodes" \end{verbatim} You can see all of the infoamtion by running the line \texttt{print(req.content)}, but this will result in a very long printout in your console. Instead, you can view list items individually. Here, we highlight a couple of interesting examples: \begin{Shaded} \begin{Highlighting}[] \CommentTok{# View Abstract} \NormalTok{req.content}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{productAbstract} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "This data product contains the quality-controlled, native sampling resolution data from NEON's breeding landbird sampling. Breeding landbirds are defined as “smaller birds (usually exclusive of raptors and upland game birds) not usually associated with aquatic habitats” (Ralph et al. 1993). The breeding landbird point counts product provides records of species identification of all individuals observed during the 6-minute count period, as well as metadata which can be used to model detectability, e.g., weather, distances from observers to birds, and detection methods. The NEON point count method is adapted from the Integrated Monitoring in Bird Conservation Regions (IMBCR): Field protocol for spatially-balanced sampling of landbird populations (Hanni et al. 2017; http://bit.ly/2u2ChUB). For additional details, see the user guide, protocols, and science design listed in the Documentation section in [this data product's details webpage](https://data.neonscience.org/data-products/DP1.10003.001). \n\nLatency:\nThe expected time from data and/or sample collection in the field to data publication is as follows, for each of the data tables (in days) in the downloaded data package. See the Data Product User Guide for more information.\n \nbrd_countdata: 120\n\nbrd_perpoint: 120\n\nbrd_personnel: 120\n\nbrd_references: 120" \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# View Available months and associated URLs for Onaqui, Utah - ONAQ} \NormalTok{req.content}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{siteCodes[[}\DecValTok{27}\NormalTok{]]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## $siteCode ## [1] "ONAQ" ## ## $availableMonths ## $availableMonths[[1]] ## [1] "2017-05" ## ## $availableMonths[[2]] ## [1] "2018-05" ## ## $availableMonths[[3]] ## [1] "2018-06" ## ## $availableMonths[[4]] ## [1] "2019-05" ## ## ## $availableDataUrls ## $availableDataUrls[[1]] ## [1] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ONAQ/2017-05" ## ## $availableDataUrls[[2]] ## [1] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ONAQ/2018-05" ## ## $availableDataUrls[[3]] ## [1] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ONAQ/2018-06" ## ## $availableDataUrls[[4]] ## [1] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ONAQ/2019-05" \end{verbatim} To get a more accessible view of which sites have data for which months, you'll need to extract data from the nested list. There are a variety of ways to do this, in this tutorial we'll explore a couple of them. Here we'll use \texttt{fromJSON()}, in the jsonlite package, which doesn't fully flatten the nested list, but gets us the part we need. To use it, we need a text version of the content. The text version is not as human readable but is readable by the \texttt{fromJSON()} function. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# make this JSON readable -> "text"} \NormalTok{req.text <-}\StringTok{ }\KeywordTok{content}\NormalTok{(req, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{)} \CommentTok{# Flatten data frame to see available data. } \NormalTok{avail <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(req.text, }\DataTypeTok{simplifyDataFrame=}\NormalTok{T, }\DataTypeTok{flatten=}\NormalTok{T)} \NormalTok{avail} \end{Highlighting} \end{Shaded} \begin{verbatim} ## $data ## $data$productCodeLong ## [1] "NEON.DOM.SITE.DP1.10003.001" ## ## $data$productCode ## [1] "DP1.10003.001" ## ## $data$productCodePresentation ## [1] "NEON.DP1.10003" ## ## $data$productName ## [1] "Breeding landbird point counts" ## ## $data$productDescription ## [1] "Count, distance from observer, and taxonomic identification of breeding landbirds observed during point counts" ## ## $data$productStatus ## [1] "ACTIVE" ## ## $data$productCategory ## [1] "Level 1 Data Product" ## ## $data$productHasExpanded ## [1] TRUE ## ## $data$productScienceTeamAbbr ## [1] "TOS" ## ## $data$productScienceTeam ## [1] "Terrestrial Observation System (TOS)" ## ## $data$productPublicationFormatType ## [1] "TOS Data Product Type" ## ## $data$productAbstract ## [1] "This data product contains the quality-controlled, native sampling resolution data from NEON's breeding landbird sampling. Breeding landbirds are defined as “smaller birds (usually exclusive of raptors and upland game birds) not usually associated with aquatic habitats” (Ralph et al. 1993). The breeding landbird point counts product provides records of species identification of all individuals observed during the 6-minute count period, as well as metadata which can be used to model detectability, e.g., weather, distances from observers to birds, and detection methods. The NEON point count method is adapted from the Integrated Monitoring in Bird Conservation Regions (IMBCR): Field protocol for spatially-balanced sampling of landbird populations (Hanni et al. 2017; http://bit.ly/2u2ChUB). For additional details, see the user guide, protocols, and science design listed in the Documentation section in [this data product's details webpage](https://data.neonscience.org/data-products/DP1.10003.001). \n\nLatency:\nThe expected time from data and/or sample collection in the field to data publication is as follows, for each of the data tables (in days) in the downloaded data package. See the Data Product User Guide for more information.\n \nbrd_countdata: 120\n\nbrd_perpoint: 120\n\nbrd_personnel: 120\n\nbrd_references: 120" ## ## $data$productDesignDescription ## [1] "Depending on the size of the site, sampling for this product occurs either at either randomly distributed individual points or grids of nine points each. At larger sites, point count sampling occurs at five to fifteen 9-point grids, with grid centers collocated with distributed base plot centers (where plant, beetle, and/or soil sampling may also occur), if possible. At smaller sites (i.e., sites that cannot accommodate a minimum of 5 grids) point counts occur at the southwest corner (point 21) of 5-25 distributed base plots. Point counts are conducted once per breeding season at large sites and twice per breeding season at smaller sites. Point counts are six minutes long, with each minute tracked by the observer, following a two-minute settling-in period. All birds are recorded to species and sex, whenever possible, and the distance to each individual or flock is measured with a laser rangefinder, except in the case of flyovers." ## ## $data$productStudyDescription ## [1] "This sampling occurs at all NEON terrestrial sites." ## ## $data$productBasicDescription ## [1] "The basic package contains the per point metadata table that includes data pertaining to the observer and the weather conditions and the count data table that includes all of the observational data." ## ## $data$productExpandedDescription ## [1] "The expanded package includes two additional tables and two additional fields within the count data table. The personnel table provides institutional information about each observer, as well as their performance on identification quizzes, where available. The references tables provides the list of resources used by an observer to identify birds. The additional fields in the countdata table are family and nativeStatusCode, which are derived from the NEON master list of birds." ## ## $data$productSensor ## NULL ## ## $data$productRemarks ## [1] "Queries for this data product will return data collected during the date range specified for `brd_perpoint` and `brd_countdata`, but will return data from all dates for `brd_personnel` (quiz scores may occur over time periods which are distinct from when sampling occurs) and `brd_references` (which apply to a broad range of sampling dates). A record from `brd_perPoint` should have 6+ child records in `brd_countdata`, at least one per pointCountMinute. Duplicates or missing data may exist where protocol and/or data entry aberrations have occurred; users should check data carefully for anomalies before joining tables. Taxonomic IDs of species of concern have been 'fuzzed'; see data package readme files for more information." ## ## $data$themes ## [1] "Organisms, Populations, and Communities" ## ## $data$changeLogs ## NULL ## ## $data$specs ## specId specNumber ## 1 3656 NEON.DOC.000916vC ## 2 2565 NEON_bird_userGuide_vA ## 3 3729 NEON.DOC.014041vJ ## ## $data$keywords ## [1] "vertebrates" "birds" ## [3] "diversity" "taxonomy" ## [5] "community composition" "distance sampling" ## [7] "avian" "species composition" ## [9] "population" "Aves" ## [11] "Chordata" "point counts" ## [13] "landbirds" "invasive" ## [15] "introduced" "native" ## [17] "animals" "Animalia" ## ## $data$siteCodes ## siteCode ## 1 ABBY ## 2 BARR ## 3 BART ## 4 BLAN ## 5 BONA ## 6 CLBJ ## 7 CPER ## 8 DCFS ## 9 DEJU ## 10 DELA ## 11 DSNY ## 12 GRSM ## 13 GUAN ## 14 HARV ## 15 HEAL ## 16 JERC ## 17 JORN ## 18 KONA ## 19 KONZ ## 20 LAJA ## 21 LENO ## 22 MLBS ## 23 MOAB ## 24 NIWO ## 25 NOGP ## 26 OAES ## 27 ONAQ ## 28 ORNL ## 29 OSBS ## 30 PUUM ## 31 RMNP ## 32 SCBI ## 33 SERC ## 34 SJER ## 35 SOAP ## 36 SRER ## 37 STEI ## 38 STER ## 39 TALL ## 40 TEAK ## 41 TOOL ## 42 TREE ## 43 UKFS ## 44 UNDE ## 45 WOOD ## 46 WREF ## 47 YELL ## availableMonths ## 1 2017-05, 2017-06, 2018-06, 2018-07, 2019-05 ## 2 2017-07, 2018-07, 2019-06 ## 3 2015-06, 2016-06, 2017-06, 2018-06, 2019-06 ## 4 2017-05, 2017-06, 2018-05, 2018-06, 2019-05, 2019-06 ## 5 2017-06, 2018-06, 2018-07, 2019-06 ## 6 2017-05, 2018-04, 2019-04, 2019-05 ## 7 2013-06, 2015-05, 2016-05, 2017-05, 2017-06, 2018-05, 2019-06 ## 8 2017-06, 2017-07, 2018-07, 2019-06, 2019-07 ## 9 2017-06, 2018-06, 2019-06 ## 10 2015-06, 2017-06, 2018-05, 2019-06 ## 11 2015-06, 2016-05, 2017-05, 2018-05, 2019-05 ## 12 2016-06, 2017-05, 2017-06, 2018-05, 2019-05 ## 13 2015-05, 2017-05, 2018-05, 2019-05, 2019-06 ## 14 2015-05, 2015-06, 2016-06, 2017-06, 2018-06, 2019-06 ## 15 2017-06, 2018-06, 2018-07, 2019-06, 2019-07 ## 16 2016-06, 2017-05, 2018-06, 2019-06 ## 17 2017-04, 2017-05, 2018-04, 2018-05, 2019-04 ## 18 2018-05, 2018-06, 2019-06 ## 19 2017-06, 2018-05, 2018-06, 2019-06 ## 20 2017-05, 2018-05, 2019-05, 2019-06 ## 21 2017-06, 2018-05, 2019-06 ## 22 2018-06, 2019-05 ## 23 2015-06, 2017-05, 2018-05, 2019-05 ## 24 2015-07, 2017-07, 2018-07, 2019-07 ## 25 2017-07, 2018-07, 2019-07 ## 26 2017-05, 2017-06, 2018-04, 2018-05, 2019-05 ## 27 2017-05, 2018-05, 2018-06, 2019-05 ## 28 2016-05, 2016-06, 2017-05, 2018-06, 2019-05 ## 29 2016-05, 2017-05, 2018-05, 2019-05 ## 30 2018-04 ## 31 2017-06, 2017-07, 2018-06, 2018-07, 2019-06, 2019-07 ## 32 2015-06, 2016-05, 2016-06, 2017-05, 2017-06, 2018-05, 2018-06, 2019-05, 2019-06 ## 33 2017-05, 2017-06, 2018-05, 2019-05 ## 34 2017-04, 2018-04, 2019-04 ## 35 2017-05, 2018-05, 2019-05 ## 36 2017-05, 2018-04, 2018-05, 2019-04 ## 37 2016-05, 2016-06, 2017-06, 2018-05, 2018-06, 2019-05, 2019-06 ## 38 2013-06, 2015-05, 2016-05, 2017-05, 2018-05, 2019-05, 2019-06 ## 39 2015-06, 2016-07, 2017-06, 2018-06, 2019-05 ## 40 2017-06, 2018-06, 2019-06, 2019-07 ## 41 2017-06, 2018-07, 2019-06 ## 42 2016-06, 2017-06, 2018-06, 2019-06 ## 43 2017-06, 2018-06, 2019-06 ## 44 2016-06, 2016-07, 2017-06, 2018-06, 2019-06 ## 45 2015-07, 2017-07, 2018-07, 2019-06, 2019-07 ## 46 2018-06, 2019-05, 2019-06 ## 47 2018-06, 2019-06 ## availableDataUrls ## 1 https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2019-05 ## 2 https://data.neonscience.org/api/v0/data/DP1.10003.001/BARR/2017-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/BARR/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/BARR/2019-06 ## 3 https://data.neonscience.org/api/v0/data/DP1.10003.001/BART/2015-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/BART/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/BART/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/BART/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/BART/2019-06 ## 4 https://data.neonscience.org/api/v0/data/DP1.10003.001/BLAN/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/BLAN/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/BLAN/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/BLAN/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/BLAN/2019-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/BLAN/2019-06 ## 5 https://data.neonscience.org/api/v0/data/DP1.10003.001/BONA/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/BONA/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/BONA/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/BONA/2019-06 ## 6 https://data.neonscience.org/api/v0/data/DP1.10003.001/CLBJ/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/CLBJ/2018-04, https://data.neonscience.org/api/v0/data/DP1.10003.001/CLBJ/2019-04, https://data.neonscience.org/api/v0/data/DP1.10003.001/CLBJ/2019-05 ## 7 https://data.neonscience.org/api/v0/data/DP1.10003.001/CPER/2013-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/CPER/2015-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/CPER/2016-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/CPER/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/CPER/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/CPER/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/CPER/2019-06 ## 8 https://data.neonscience.org/api/v0/data/DP1.10003.001/DCFS/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/DCFS/2017-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/DCFS/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/DCFS/2019-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/DCFS/2019-07 ## 9 https://data.neonscience.org/api/v0/data/DP1.10003.001/DEJU/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/DEJU/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/DEJU/2019-06 ## 10 https://data.neonscience.org/api/v0/data/DP1.10003.001/DELA/2015-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/DELA/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/DELA/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/DELA/2019-06 ## 11 https://data.neonscience.org/api/v0/data/DP1.10003.001/DSNY/2015-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/DSNY/2016-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/DSNY/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/DSNY/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/DSNY/2019-05 ## 12 https://data.neonscience.org/api/v0/data/DP1.10003.001/GRSM/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/GRSM/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/GRSM/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/GRSM/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/GRSM/2019-05 ## 13 https://data.neonscience.org/api/v0/data/DP1.10003.001/GUAN/2015-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/GUAN/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/GUAN/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/GUAN/2019-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/GUAN/2019-06 ## 14 https://data.neonscience.org/api/v0/data/DP1.10003.001/HARV/2015-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/HARV/2015-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/HARV/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/HARV/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/HARV/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/HARV/2019-06 ## 15 https://data.neonscience.org/api/v0/data/DP1.10003.001/HEAL/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/HEAL/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/HEAL/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/HEAL/2019-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/HEAL/2019-07 ## 16 https://data.neonscience.org/api/v0/data/DP1.10003.001/JERC/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/JERC/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/JERC/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/JERC/2019-06 ## 17 https://data.neonscience.org/api/v0/data/DP1.10003.001/JORN/2017-04, https://data.neonscience.org/api/v0/data/DP1.10003.001/JORN/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/JORN/2018-04, https://data.neonscience.org/api/v0/data/DP1.10003.001/JORN/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/JORN/2019-04 ## 18 https://data.neonscience.org/api/v0/data/DP1.10003.001/KONA/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/KONA/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/KONA/2019-06 ## 19 https://data.neonscience.org/api/v0/data/DP1.10003.001/KONZ/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/KONZ/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/KONZ/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/KONZ/2019-06 ## 20 https://data.neonscience.org/api/v0/data/DP1.10003.001/LAJA/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/LAJA/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/LAJA/2019-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/LAJA/2019-06 ## 21 https://data.neonscience.org/api/v0/data/DP1.10003.001/LENO/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/LENO/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/LENO/2019-06 ## 22 https://data.neonscience.org/api/v0/data/DP1.10003.001/MLBS/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/MLBS/2019-05 ## 23 https://data.neonscience.org/api/v0/data/DP1.10003.001/MOAB/2015-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/MOAB/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/MOAB/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/MOAB/2019-05 ## 24 https://data.neonscience.org/api/v0/data/DP1.10003.001/NIWO/2015-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/NIWO/2017-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/NIWO/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/NIWO/2019-07 ## 25 https://data.neonscience.org/api/v0/data/DP1.10003.001/NOGP/2017-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/NOGP/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/NOGP/2019-07 ## 26 https://data.neonscience.org/api/v0/data/DP1.10003.001/OAES/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/OAES/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/OAES/2018-04, https://data.neonscience.org/api/v0/data/DP1.10003.001/OAES/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/OAES/2019-05 ## 27 https://data.neonscience.org/api/v0/data/DP1.10003.001/ONAQ/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/ONAQ/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/ONAQ/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/ONAQ/2019-05 ## 28 https://data.neonscience.org/api/v0/data/DP1.10003.001/ORNL/2016-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/ORNL/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/ORNL/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/ORNL/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/ORNL/2019-05 ## 29 https://data.neonscience.org/api/v0/data/DP1.10003.001/OSBS/2016-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/OSBS/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/OSBS/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/OSBS/2019-05 ## 30 https://data.neonscience.org/api/v0/data/DP1.10003.001/PUUM/2018-04 ## 31 https://data.neonscience.org/api/v0/data/DP1.10003.001/RMNP/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/RMNP/2017-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/RMNP/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/RMNP/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/RMNP/2019-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/RMNP/2019-07 ## 32 https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2015-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2016-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2019-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SCBI/2019-06 ## 33 https://data.neonscience.org/api/v0/data/DP1.10003.001/SERC/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SERC/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/SERC/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SERC/2019-05 ## 34 https://data.neonscience.org/api/v0/data/DP1.10003.001/SJER/2017-04, https://data.neonscience.org/api/v0/data/DP1.10003.001/SJER/2018-04, https://data.neonscience.org/api/v0/data/DP1.10003.001/SJER/2019-04 ## 35 https://data.neonscience.org/api/v0/data/DP1.10003.001/SOAP/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SOAP/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SOAP/2019-05 ## 36 https://data.neonscience.org/api/v0/data/DP1.10003.001/SRER/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SRER/2018-04, https://data.neonscience.org/api/v0/data/DP1.10003.001/SRER/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/SRER/2019-04 ## 37 https://data.neonscience.org/api/v0/data/DP1.10003.001/STEI/2016-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/STEI/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/STEI/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/STEI/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/STEI/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/STEI/2019-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/STEI/2019-06 ## 38 https://data.neonscience.org/api/v0/data/DP1.10003.001/STER/2013-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/STER/2015-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/STER/2016-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/STER/2017-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/STER/2018-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/STER/2019-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/STER/2019-06 ## 39 https://data.neonscience.org/api/v0/data/DP1.10003.001/TALL/2015-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TALL/2016-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/TALL/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TALL/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TALL/2019-05 ## 40 https://data.neonscience.org/api/v0/data/DP1.10003.001/TEAK/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TEAK/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TEAK/2019-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TEAK/2019-07 ## 41 https://data.neonscience.org/api/v0/data/DP1.10003.001/TOOL/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TOOL/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/TOOL/2019-06 ## 42 https://data.neonscience.org/api/v0/data/DP1.10003.001/TREE/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TREE/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TREE/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/TREE/2019-06 ## 43 https://data.neonscience.org/api/v0/data/DP1.10003.001/UKFS/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/UKFS/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/UKFS/2019-06 ## 44 https://data.neonscience.org/api/v0/data/DP1.10003.001/UNDE/2016-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/UNDE/2016-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/UNDE/2017-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/UNDE/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/UNDE/2019-06 ## 45 https://data.neonscience.org/api/v0/data/DP1.10003.001/WOOD/2015-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/WOOD/2017-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/WOOD/2018-07, https://data.neonscience.org/api/v0/data/DP1.10003.001/WOOD/2019-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/WOOD/2019-07 ## 46 https://data.neonscience.org/api/v0/data/DP1.10003.001/WREF/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/WREF/2019-05, https://data.neonscience.org/api/v0/data/DP1.10003.001/WREF/2019-06 ## 47 https://data.neonscience.org/api/v0/data/DP1.10003.001/YELL/2018-06, https://data.neonscience.org/api/v0/data/DP1.10003.001/YELL/2019-06 \end{verbatim} The object contains a lot of information about the data product, including: \begin{itemize} \tightlist \item keywords under \texttt{\$data\$keywords}, \item references for documentation under \texttt{\$data\$specs}, \item data availability by site and month under \texttt{\$data\$siteCodes}, and \item specific URLs for the API calls for each site and month under \texttt{\$data\$siteCodes\$availableDataUrls}. \end{itemize} We need \texttt{\$data\$siteCodes} to tell us what we can download. \texttt{\$data\$siteCodes\$availableDataUrls} allows us to avoid writing the API calls ourselves in the next steps. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# get data availability list for the product} \NormalTok{bird.urls <-}\StringTok{ }\KeywordTok{unlist}\NormalTok{(avail}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{siteCodes}\OperatorTok{$}\NormalTok{availableDataUrls)} \KeywordTok{length}\NormalTok{(bird.urls) }\CommentTok{#total number of URLs} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 204 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{bird.urls[}\DecValTok{1}\OperatorTok{:}\DecValTok{10}\NormalTok{] }\CommentTok{#show first 10 URLs available} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2017-05" ## [2] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2017-06" ## [3] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2018-06" ## [4] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2018-07" ## [5] "https://data.neonscience.org/api/v0/data/DP1.10003.001/ABBY/2019-05" ## [6] "https://data.neonscience.org/api/v0/data/DP1.10003.001/BARR/2017-07" ## [7] "https://data.neonscience.org/api/v0/data/DP1.10003.001/BARR/2018-07" ## [8] "https://data.neonscience.org/api/v0/data/DP1.10003.001/BARR/2019-06" ## [9] "https://data.neonscience.org/api/v0/data/DP1.10003.001/BART/2015-06" ## [10] "https://data.neonscience.org/api/v0/data/DP1.10003.001/BART/2016-06" \end{verbatim} These are the URLs showing us what files are available for each month where there are data. Let's look at the bird data from Woodworth (WOOD) site from July 2015. We can do this by using the above code but now specifying which site/date we want using the \texttt{grep()} function. Note that if there were only one month of data from a site, you could leave off the date in the function. If you want date from more than one site/month you need to iterate this code, GET fails if you give it more than one URL. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# get data availability for WOOD July 2015} \NormalTok{brd <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(bird.urls[}\KeywordTok{grep}\NormalTok{(}\StringTok{"WOOD/2015-07"}\NormalTok{, bird.urls)])} \NormalTok{brd.files <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(brd, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{))} \CommentTok{# view just the available data files } \NormalTok{brd.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files} \end{Highlighting} \end{Shaded} \begin{verbatim} ## crc32 ## 1 e0adb3146b5cce59eea09864145efcb1 ## 2 4438e5e050fc7be5949457f42089a397 ## 3 d84b496cf950b5b96e762473beda563a ## 4 6d15da01c03793da8fc6d871e6659ea8 ## 5 f37931d46213246dccf2a161211c9afe ## 6 e67f1ae72760a63c616ec18108453aaa ## 7 df102cb4cfdce092cda3c0942c9d9b67 ## 8 e67f1ae72760a63c616ec18108453aaa ## 9 2ad379ae44f4e87996bdc3dee70a0794 ## 10 d76cfc5443ac27a058fab1d319d31d34 ## 11 22e3353dabb8b154768dc2eee9873718 ## 12 6d15da01c03793da8fc6d871e6659ea8 ## 13 a2c47410a6a0f49d0b1cf95be6238604 ## 14 f37931d46213246dccf2a161211c9afe ## 15 6ba91b6e109ff14d1911dcaad9febeb9 ## 16 680a2f53c0a9d1b0ab4f8814bda5b399 ## name ## 1 NEON.D09.WOOD.DP1.10003.001.brd_countdata.2015-07.basic.20191107T152331Z.csv ## 2 NEON.D09.WOOD.DP1.10003.001.2015-07.basic.20191107T152331Z.zip ## 3 NEON.D09.WOOD.DP1.10003.001.readme.20191107T152331Z.txt ## 4 NEON.D09.WOOD.DP0.10003.001.validation.20191107T152331Z.csv ## 5 NEON.D09.WOOD.DP1.10003.001.brd_perpoint.2015-07.basic.20191107T152331Z.csv ## 6 NEON.D09.WOOD.DP1.10003.001.variables.20191107T152331Z.csv ## 7 NEON.D09.WOOD.DP1.10003.001.EML.20150701-20150705.20191107T152331Z.xml ## 8 NEON.D09.WOOD.DP1.10003.001.variables.20191107T152331Z.csv ## 9 NEON.D09.WOOD.DP1.10003.001.brd_countdata.2015-07.expanded.20191107T152331Z.csv ## 10 NEON.D09.WOOD.DP1.10003.001.brd_references.expanded.20191107T152331Z.csv ## 11 NEON.D09.WOOD.DP1.10003.001.2015-07.expanded.20191107T152331Z.zip ## 12 NEON.D09.WOOD.DP0.10003.001.validation.20191107T152331Z.csv ## 13 NEON.Bird_Conservancy_of_the_Rockies.brd_personnel.csv ## 14 NEON.D09.WOOD.DP1.10003.001.brd_perpoint.2015-07.expanded.20191107T152331Z.csv ## 15 NEON.D09.WOOD.DP1.10003.001.EML.20150701-20150705.20191107T152331Z.xml ## 16 NEON.D09.WOOD.DP1.10003.001.readme.20191107T152331Z.txt ## size ## 1 346679 ## 2 67816 ## 3 12784 ## 4 10084 ## 5 23521 ## 6 7337 ## 7 70539 ## 8 7337 ## 9 367402 ## 10 1012 ## 11 79998 ## 12 10084 ## 13 46349 ## 14 23521 ## 15 78750 ## 16 13063 ## url ## 1 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/basic/NEON.D09.WOOD.DP1.10003.001.brd_countdata.2015-07.basic.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=09f46fc53d78da33e7628ad91dc106ed549b0c8e24dd943db28b04659cf28c77 ## 2 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/basic/NEON.D09.WOOD.DP1.10003.001.2015-07.basic.20191107T152331Z.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=b5ae8d5b44a3228cf14476d056a5c531e1c7597a97892ed330510192264da61d ## 3 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/basic/NEON.D09.WOOD.DP1.10003.001.readme.20191107T152331Z.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=712db4801417e0ae09308e3b585c462c7e646395adf9c7665c2ded9220633c08 ## 4 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/basic/NEON.D09.WOOD.DP0.10003.001.validation.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=4e0554fe2474f0672fc478388cd310b5ec933680388f1f923f2ffee3fda0c9c2 ## 5 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/basic/NEON.D09.WOOD.DP1.10003.001.brd_perpoint.2015-07.basic.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=5c456d2c3520c1020974b40e1df9befffedf3201614a3e92b7b318eefccc4217 ## 6 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/basic/NEON.D09.WOOD.DP1.10003.001.variables.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=59d1fc21c86b2b5b2f66e3b5356a9dac4464f17dd724461f011629ea0daa0a6c ## 7 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/basic/NEON.D09.WOOD.DP1.10003.001.EML.20150701-20150705.20191107T152331Z.xml?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=21ac38da61c3343f2a4b8c824327987d9d91acec5e46ce07edaa5b5cdd05a4ef ## 8 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.D09.WOOD.DP1.10003.001.variables.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=4cd70acfffd40a9f4644c528bc81d690f47072c964f2a736840dd3ab6983a68a ## 9 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.D09.WOOD.DP1.10003.001.brd_countdata.2015-07.expanded.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=484acdcdd6995045144baad5ae0d13a37fbf11ae7a207b02cff115ddfae34dcf ## 10 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.D09.WOOD.DP1.10003.001.brd_references.expanded.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=f489aa86c91497427a31716aef73c033e62759065c1ea397fe7a9c762a90bee9 ## 11 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.D09.WOOD.DP1.10003.001.2015-07.expanded.20191107T152331Z.zip?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=2c021d756672bdf7fa5758d5c9172cbcad16643212778ae3f74e6f8312bfd489 ## 12 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.D09.WOOD.DP0.10003.001.validation.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=a6aff705486243a0bbed2c72767535ce75d711007cfd106778d8aeeb6ea80b06 ## 13 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.Bird_Conservancy_of_the_Rockies.brd_personnel.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=f27724956fe7252466648168ea1acf61c8a2effa0840c2423da09f8f476b651a ## 14 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.D09.WOOD.DP1.10003.001.brd_perpoint.2015-07.expanded.20191107T152331Z.csv?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=68170cdb4102c3bd396bcea7c5e09cb3a2e08a2f24a0ec54be9149ce23081412 ## 15 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.D09.WOOD.DP1.10003.001.EML.20150701-20150705.20191107T152331Z.xml?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=90345127eeadd2a25e6e3f6d2defad1c132b232178ef7b8d5bf3ee048cffab3a ## 16 https://neon-prod-pub-1.s3.data.neonscience.org/NEON.DOM.SITE.DP1.10003.001/PROV/WOOD/20150701T000000--20150801T000000/expanded/NEON.D09.WOOD.DP1.10003.001.readme.20191107T152331Z.txt?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20200731T210355Z&X-Amz-SignedHeaders=host&X-Amz-Expires=3600&X-Amz-Credential=pub-internal-read%2F20200731%2Fus-west-2%2Fs3%2Faws4_request&X-Amz-Signature=eb78a08a35b988b0ccac119db885bf28256826ae97dd64814cb8e02dd935ecd0 \end{verbatim} In this output, \texttt{name} and \texttt{url} are key fields. It provides us with the names of the files available for this site and month, and URLs where we can get the files. We'll use the file names to pick which ones we want. The available files include both \textbf{data} and \textbf{metadata}, and both the \textbf{basic} and \textbf{expanded} data packages. Typically the expanded package includes additional quality or uncertainty data, either in additional files or additional fields than in the basic files. Basic and expanded data packages are available for most NEON data products (some only have basic). Metadata are described by file name below. The format for most of the file names is: \textbf{NEON.{[}domain number{]}.{[}site code{]}.{[}data product ID{]}.{[}file-specific name{]}. {[}date of file creation{]}} Some files omit the domain and site, since they're not specific to a location, like the data product readme. The date of file creation uses the ISO6801 format, in this case 20170720T182547Z, and can be used to determine whether data have been updated since the last time you downloaded. Available files in our query for July 2015 at Woodworth are all of the following (leaving off the initial NEON.D09.WOOD.10003.001): \begin{itemize} \item \textbf{\textasciitilde{}.2015-07.expanded.20170720T182547Z.zip:} zip of all files in the expanded package \item \textbf{\textasciitilde{}.brd\_countdata.2015-07.expanded.20170720T182547Z.csv:} count data table, expanded package version: counts of birds at each point \item \textbf{\textasciitilde{}.brd\_perpoint.2015-07.expanded.20170720T182547Z.csv:} point data table, expanded package version: metadata at each observation point \item \textbf{NEON.Bird Conservancy of the Rockies.brd\_personnel.csv:} personnel data table, accuracy scores for bird observers \item \textbf{\textasciitilde{}.2015-07.basic.20170720T182547Z.zip:} zip of all files in the basic package \item \textbf{\textasciitilde{}.brd\_countdata.2015-07.basic.20170720T182547Z.csv:} count data table, basic package version: counts of birds at each point \item \textbf{\textasciitilde{}.brd\_perpoint.2015-07.basic.20170720T182547Z.csv:} point data table, basic package version: metadata at each observation point \item \textbf{NEON.DP1.10003.001\_readme.txt:} readme for the data product (not specific to dates or location). Appears twice in the list, since it's in both the basic and expanded package \item \textbf{\textasciitilde{}.20150101-20160613.xml:} Ecological Metadata Language (EML) file. Appears twice in the list, since it's in both the basic and expanded package \item \textbf{\textasciitilde{}.validation.20170720T182547Z.csv:} validation file for the data product, lists input data and data entry rules. Appears twice in the list, since it's in both the basic and expanded package \item \textbf{\textasciitilde{}.variables.20170720T182547Z.csv:} variables file for the data product, lists data fields in downloaded tables. Appears twice in the list, since it's in both the basic and expanded package \end{itemize} We'll get the data tables for the point data and count data in the basic package. The list of files doesn't return in the same order every time, so we won't use position in the list to select. Plus, we want code we can re-use when getting data from other sites and other months. So we select files based on the data table name and the package name. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Get both files} \NormalTok{brd.count <-}\StringTok{ }\KeywordTok{read.delim}\NormalTok{(brd.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{url} \NormalTok{ [}\KeywordTok{intersect}\NormalTok{(}\KeywordTok{grep}\NormalTok{(}\StringTok{"countdata"}\NormalTok{, } \NormalTok{ brd.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name),} \KeywordTok{grep}\NormalTok{(}\StringTok{"basic"}\NormalTok{, } \NormalTok{ brd.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name))], } \DataTypeTok{sep=}\StringTok{","}\NormalTok{)} \NormalTok{brd.point <-}\StringTok{ }\KeywordTok{read.delim}\NormalTok{(brd.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{url} \NormalTok{ [}\KeywordTok{intersect}\NormalTok{(}\KeywordTok{grep}\NormalTok{(}\StringTok{"perpoint"}\NormalTok{, } \NormalTok{ brd.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name),} \KeywordTok{grep}\NormalTok{(}\StringTok{"basic"}\NormalTok{, } \NormalTok{ brd.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name))], } \DataTypeTok{sep=}\StringTok{","}\NormalTok{)} \end{Highlighting} \end{Shaded} Now we have the data and can access it in R. Just to show that the files we pulled have actual data in them, let's make a quick graphic: \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Cluster by species } \NormalTok{clusterBySp <-}\StringTok{ }\NormalTok{brd.count }\OperatorTok{%>%} \StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{group_by}\NormalTok{(scientificName) }\OperatorTok{%>%} \StringTok{ }\NormalTok{dplyr}\OperatorTok{::}\KeywordTok{summarise}\NormalTok{(}\DataTypeTok{total=}\KeywordTok{sum}\NormalTok{(clusterSize, }\DataTypeTok{na.rm=}\NormalTok{T))} \end{Highlighting} \end{Shaded} \begin{verbatim} ## `summarise()` ungrouping output (override with `.groups` argument) \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Reorder so list is ordered most to least abundance} \NormalTok{clusterBySp <-}\StringTok{ }\NormalTok{clusterBySp[}\KeywordTok{order}\NormalTok{(clusterBySp}\OperatorTok{$}\NormalTok{total, }\DataTypeTok{decreasing=}\NormalTok{T),]} \CommentTok{# Plot} \KeywordTok{barplot}\NormalTok{(clusterBySp}\OperatorTok{$}\NormalTok{total, }\DataTypeTok{names.arg=}\NormalTok{clusterBySp}\OperatorTok{$}\NormalTok{scientificName, } \DataTypeTok{ylab=}\StringTok{"Total"}\NormalTok{, }\DataTypeTok{cex.names=}\FloatTok{0.5}\NormalTok{, }\DataTypeTok{las=}\DecValTok{2}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{INF550_files/figure-latex/os-plot-bird-data-1.pdf} Wow! There are lots of \emph{Agelaius phoeniceus} (Red-winged Blackbirds) at WOOD in July. \hypertarget{instrumentation-data-is}{% \section{Instrumentation data (IS)}\label{instrumentation-data-is}} The process is essentially the same for sensor data. We'll do the same series of queries for Soil Temperature, DP1.00041.001. Let's use data from Moab in March 2017 this time. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Request soil temperature data availability info} \NormalTok{req.soil <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(}\StringTok{"http://data.neonscience.org/api/v0/products/DP1.00041.001"}\NormalTok{)} \CommentTok{# make this JSON readable} \CommentTok{# Note how we've change this from two commands into one here} \NormalTok{avail.soil <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(req.soil, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{), }\DataTypeTok{simplifyDataFrame=}\NormalTok{T, }\DataTypeTok{flatten=}\NormalTok{T)} \CommentTok{# get data availability list for the product} \NormalTok{temp.urls <-}\StringTok{ }\KeywordTok{unlist}\NormalTok{(avail.soil}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{siteCodes}\OperatorTok{$}\NormalTok{availableDataUrls)} \CommentTok{# get data availability from location/date of interest} \NormalTok{tmp <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(temp.urls[}\KeywordTok{grep}\NormalTok{(}\StringTok{"MOAB/2017-06"}\NormalTok{, temp.urls)])} \NormalTok{tmp.files <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(tmp, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{))} \KeywordTok{length}\NormalTok{(tmp.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name) }\CommentTok{# There are a lot of available files} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 190 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{tmp.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name[}\DecValTok{1}\OperatorTok{:}\DecValTok{10}\NormalTok{] }\CommentTok{# Let's print the first 10} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "NEON.D13.MOAB.DP1.00041.001.004.501.030.ST_30_minute.2017-06.expanded.20200620T070859Z.csv" ## [2] "NEON.D13.MOAB.DP1.00041.001.002.506.030.ST_30_minute.2017-06.expanded.20200620T070859Z.csv" ## [3] "NEON.D13.MOAB.DP1.00041.001.004.505.001.ST_1_minute.2017-06.expanded.20200620T070859Z.csv" ## [4] "NEON.D13.MOAB.DP1.00041.001.001.508.001.ST_1_minute.2017-06.expanded.20200620T070859Z.csv" ## [5] "NEON.D13.MOAB.DP1.00041.001.003.505.030.ST_30_minute.2017-06.expanded.20200620T070859Z.csv" ## [6] "NEON.D13.MOAB.DP1.00041.001.003.501.001.ST_1_minute.2017-06.expanded.20200620T070859Z.csv" ## [7] "NEON.D13.MOAB.DP1.00041.001.002.501.030.ST_30_minute.2017-06.expanded.20200620T070859Z.csv" ## [8] "NEON.D13.MOAB.DP1.00041.001.004.502.001.ST_1_minute.2017-06.expanded.20200620T070859Z.csv" ## [9] "NEON.D13.MOAB.DP1.00041.001.004.509.001.ST_1_minute.2017-06.expanded.20200620T070859Z.csv" ## [10] "NEON.D13.MOAB.DP1.00041.001.sensor_positions.20200620T070859Z.csv" \end{verbatim} These file names start and end the same way as the observational files, but the middle is a little more cryptic. The structure from beginning to end is: \textbf{NEON.{[}domain number{]}.{[}site code{]}.{[}data product ID{]}.00000. {[}soil plot number{]}.{[}depth{]}.{[}averaging interval{]}.{[}data table name{]}. {[}year{]}-{[}month{]}.{[}data package{]}.{[}date of file creation{]}} So \textbf{``NEON.D13.MOAB.DP1.00041.001.003.507.030.ST\_30\_minute.2017-06.expanded.20200620T070859Z.csv''} is the: \begin{itemize} \tightlist \item NEON (\texttt{NEON.}) \item Domain 13 (\texttt{.D13.}) \item Moab field site (\texttt{.MOAB.}) \item soil temperature data (\texttt{.DP1.00041.001.}) \item collected in Soil Plot 2, (\texttt{.002.}) \item at the 7th depth below the surface (\texttt{.507.}) \item and reported as a 30-minute mean of (\texttt{.030.} and \texttt{.ST\_30\_minute.}) \item only for the period of June 2017 (\texttt{.2017-06.}) \item and provided in a expanded data package (\texttt{.basic.}) \item published on June 20th, 2020 (\texttt{.0200620T070859Z.}). \end{itemize} More information about interpreting file names can be found in the readme that accompanies each download. Let's get data (and the URL) for only the 2nd depth described above by selecting \texttt{002.502.030} and the word \texttt{basic} in the file name. Go get it: \begin{Shaded} \begin{Highlighting}[] \NormalTok{soil.temp <-}\StringTok{ }\KeywordTok{read.delim}\NormalTok{(tmp.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{url} \NormalTok{ [}\KeywordTok{intersect}\NormalTok{(}\KeywordTok{grep}\NormalTok{(}\StringTok{"002.502.030"}\NormalTok{, } \NormalTok{ tmp.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name),} \KeywordTok{grep}\NormalTok{(}\StringTok{"basic"}\NormalTok{, } \NormalTok{ tmp.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name))], } \DataTypeTok{sep=}\StringTok{","}\NormalTok{)} \end{Highlighting} \end{Shaded} Now we have the data and can use it to conduct our analyses. To take a quick look at it, let's plot the mean soil temperature by date. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# plot temp ~ date} \KeywordTok{plot}\NormalTok{(soil.temp}\OperatorTok{$}\NormalTok{soilTempMean}\OperatorTok{~}\KeywordTok{as.POSIXct}\NormalTok{(soil.temp}\OperatorTok{$}\NormalTok{startDateTime, } \DataTypeTok{format=}\StringTok{"%Y-%m-%d T %H:%M:%S Z"}\NormalTok{), } \DataTypeTok{pch=}\StringTok{"."}\NormalTok{, }\DataTypeTok{xlab=}\StringTok{"Date"}\NormalTok{, }\DataTypeTok{ylab=}\StringTok{"T"}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{INF550_files/figure-latex/os-plot-soil-data-1.pdf} As we'd expect we see daily fluctuation in soil temperature. \hypertarget{remote-sensing-data-aop}{% \section{Remote sensing data (AOP)}\label{remote-sensing-data-aop}} Again, the process of determining which sites and time periods have data, and finding the URLs for those data, is the same as for the other data types. We'll go looking for High resolution orthorectified camera imagery, DP1.30010, and we'll look at the flight over San Joaquin Experimental Range (SJER) in March 2017. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Request camera data availability info} \NormalTok{req.aop <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(}\StringTok{"http://data.neonscience.org/api/v0/products/DP1.30010.001"}\NormalTok{)} \CommentTok{# make this JSON readable} \CommentTok{# Note how we've changed this from two commands into one here} \NormalTok{avail.aop <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(req.aop, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{), } \DataTypeTok{simplifyDataFrame=}\NormalTok{T, }\DataTypeTok{flatten=}\NormalTok{T)} \CommentTok{# get data availability list for the product} \NormalTok{cam.urls <-}\StringTok{ }\KeywordTok{unlist}\NormalTok{(avail.aop}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{siteCodes}\OperatorTok{$}\NormalTok{availableDataUrls)} \CommentTok{# get data availability from location/date of interest} \NormalTok{cam <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(cam.urls[}\KeywordTok{intersect}\NormalTok{(}\KeywordTok{grep}\NormalTok{(}\StringTok{"SJER"}\NormalTok{, cam.urls),} \KeywordTok{grep}\NormalTok{(}\StringTok{"2017"}\NormalTok{, cam.urls))])} \NormalTok{cam.files <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(cam, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{))} \CommentTok{# this list of files is very long, so we'll just look at the first ten} \KeywordTok{head}\NormalTok{(cam.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name, }\DecValTok{10}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "17032816_EH021656(20170328190629)-0680_ort.tif" ## [2] "17032816_EH021656(20170328190117)-0642_ort.tif" ## [3] "17032816_EH021656(20170328195321)-1085_ort.tif" ## [4] "17032816_EH021656(20170328182204)-0324_ort.tif" ## [5] "17032816_EH021656(20170328193045)-0880_ort.tif" ## [6] "17032816_EH021656(20170328182447)-0358_ort.tif" ## [7] "17032816_EH021656(20170328185526)-0596_ort.tif" ## [8] "17032816_EH021656(20170328180003)-0154_ort.tif" ## [9] "17032816_EH021656(20170328192935)-0864_ort.tif" ## [10] "17032816_EH021656(20170328191703)-0760_ort.tif" \end{verbatim} File names for AOP data are more variable than for IS or OS data; different AOP data products use different naming conventions. File formats differ by product as well. This particular product, camera imagery, is stored in TIFF files. For a full list of AOP data products, their naming conventions, and their file formats, see . Instead of reading a TIFF into R, we'll download it to the working directory. This is one option for getting AOP files from the API; if you plan to work with the files in R, you'll need to know how to read the relevant file types into R. We hope to add tutorials for this in the near future. To download the TIFF file, we use the \texttt{downloader} package, and we'll select a file based on the time stamp in the file name: \texttt{20170328192931} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{download}\NormalTok{(cam.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{url[}\KeywordTok{grep}\NormalTok{(}\StringTok{"20170328192931"}\NormalTok{, } \NormalTok{ cam.files}\OperatorTok{$}\NormalTok{data}\OperatorTok{$}\NormalTok{files}\OperatorTok{$}\NormalTok{name)],} \KeywordTok{paste}\NormalTok{(}\KeywordTok{getwd}\NormalTok{(), }\StringTok{"/SJER_image.tif"}\NormalTok{, }\DataTypeTok{sep=}\StringTok{""}\NormalTok{), }\DataTypeTok{mode=}\StringTok{"wb"}\NormalTok{)} \end{Highlighting} \end{Shaded} The image, below, of the San Joaquin Experimental Range should now be in your working directory. \begin{verbatim} <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/dev-aten/graphics/neon-aop/SJER_tile_20170328192931.png"> <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/dev-aten/graphics/neon-aop/SJER_tile_20170328192931.png"></a> <figcaption> An example of camera data (DP1.30010.001) from the San Joaquin Experimental Range. Source: National Ecological Observatory Network (NEON) </figcaption> \end{verbatim} \hypertarget{geolocation-data}{% \section{Geolocation data}\label{geolocation-data}} You may have noticed some of the spatial data referenced above are a bit vague, e.g. ``soil plot 2, 4th depth below the surface.'' How to get spatial data and what to do with it depends on which type of data you're working with. \hypertarget{instrumentation-data-both-aquatic-and-terrestrial}{% \subsubsection{Instrumentation data (both aquatic and terrestrial)}\label{instrumentation-data-both-aquatic-and-terrestrial}} Stay tuned - spatial data for instruments are in the process of entry into the NEON database. \hypertarget{observational-data---aquatic}{% \subsubsection{Observational data - Aquatic}\label{observational-data---aquatic}} Latitude, longitude, elevation, and associated uncertainties are included in data downloads. Most products also include an ``additional coordinate uncertainty'' that should be added to the provided uncertainty. Additional spatial data, such as northing and easting, can be downloaded from the API. \hypertarget{observational-data---terrestrial}{% \subsubsection{Observational data - Terrestrial}\label{observational-data---terrestrial}} Latitude, longitude, elevation, and associated uncertainties are included in data downloads. These are the coordinates and uncertainty of the sampling plot; for many protocols it is possible to calculate a more precise location. Instructions for doing this are in the respective data product user guides, and code is in the \texttt{geoNEON} package on GitHub. \hypertarget{querying-a-single-named-location}{% \subsection{Querying a single named location}\label{querying-a-single-named-location}} Let's look at the named locations in the bird data we downloaded above. To do this, look for the field called \texttt{namedLocation}, which is present in all observational data products, both aquatic and terrestrial. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# view named location} \KeywordTok{head}\NormalTok{(brd.point}\OperatorTok{$}\NormalTok{namedLocation)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] WOOD_013.birdGrid.brd WOOD_013.birdGrid.brd WOOD_013.birdGrid.brd ## [4] WOOD_013.birdGrid.brd WOOD_013.birdGrid.brd WOOD_013.birdGrid.brd ## 7 Levels: WOOD_006.birdGrid.brd ... WOOD_020.birdGrid.brd \end{verbatim} Here we see the first six entries in the \texttt{namedLocation} column which tells us the names of the Terrestrial Observation plots where the bird surveys were conducted. We can query the locations endpoint of the API for the first named location, \texttt{WOOD\_013.birdGrid.brd}. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# location data } \NormalTok{req.loc <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(}\StringTok{"http://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd"}\NormalTok{)} \CommentTok{# make this JSON readable} \NormalTok{brd.WOOD_}\DecValTok{013}\NormalTok{ <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(req.loc, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{))} \NormalTok{brd.WOOD_}\DecValTok{013} \end{Highlighting} \end{Shaded} \begin{verbatim} ## $data ## $data$locationName ## [1] "WOOD_013.birdGrid.brd" ## ## $data$locationDescription ## [1] "Plot \"WOOD_013\" at site \"WOOD\"" ## ## $data$locationType ## [1] "OS Plot - brd" ## ## $data$domainCode ## [1] "D09" ## ## $data$siteCode ## [1] "WOOD" ## ## $data$locationDecimalLatitude ## [1] 47.13912 ## ## $data$locationDecimalLongitude ## [1] -99.23243 ## ## $data$locationElevation ## [1] 579.31 ## ## $data$locationUtmEasting ## [1] 482375.7 ## ## $data$locationUtmNorthing ## [1] 5220650 ## ## $data$locationUtmHemisphere ## [1] "N" ## ## $data$locationUtmZone ## [1] 14 ## ## $data$alphaOrientation ## [1] 0 ## ## $data$betaOrientation ## [1] 0 ## ## $data$gammaOrientation ## [1] 0 ## ## $data$xOffset ## [1] 0 ## ## $data$yOffset ## [1] 0 ## ## $data$zOffset ## [1] 0 ## ## $data$offsetLocation ## NULL ## ## $data$locationProperties ## locationPropertyName locationPropertyValue ## 1 Value for Coordinate source GeoXH 6000 ## 2 Value for Coordinate uncertainty 0.28 ## 3 Value for Country unitedStates ## 4 Value for County Stutsman ## 5 Value for Elevation uncertainty 0.48 ## 6 Value for Filtered positions 121 ## 7 Value for Geodetic datum WGS84 ## 8 Value for Horizontal dilution of precision 1 ## 9 Value for Maximum elevation 579.31 ## 10 Value for Minimum elevation 569.79 ## 11 Value for National Land Cover Database (2001) grasslandHerbaceous ## 12 Value for Plot dimensions 500m x 500m ## 13 Value for Plot ID WOOD_013 ## 14 Value for Plot size 250000 ## 15 Value for Plot subtype birdGrid ## 16 Value for Plot type distributed ## 17 Value for Positional dilution of precision 2.4 ## 18 Value for Reference Point Position B2 ## 19 Value for Slope aspect 238.91 ## 20 Value for Slope gradient 2.83 ## 21 Value for Soil type order Mollisols ## 22 Value for State province ND ## 23 Value for Subtype Specification ninePoints ## 24 Value for UTM Zone 14N ## ## $data$locationParent ## [1] "WOOD" ## ## $data$locationParentUrl ## [1] "https://data.neonscience.org/api/v0/locations/WOOD" ## ## $data$locationChildren ## [1] "WOOD_013.birdGrid.brd.B2" "WOOD_013.birdGrid.brd.A2" ## [3] "WOOD_013.birdGrid.brd.C3" "WOOD_013.birdGrid.brd.A3" ## [5] "WOOD_013.birdGrid.brd.B3" "WOOD_013.birdGrid.brd.C1" ## [7] "WOOD_013.birdGrid.brd.A1" "WOOD_013.birdGrid.brd.B1" ## [9] "WOOD_013.birdGrid.brd.C2" ## ## $data$locationChildrenUrls ## [1] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.B2" ## [2] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.A2" ## [3] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.C3" ## [4] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.A3" ## [5] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.B3" ## [6] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.C1" ## [7] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.A1" ## [8] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.B1" ## [9] "https://data.neonscience.org/api/v0/locations/WOOD_013.birdGrid.brd.C2" \end{verbatim} Note spatial information under \texttt{\$data\${[}nameOfCoordinate{]}} and under \texttt{\$data\$locationProperties}. Also note \texttt{\$data\$locationChildren}: these are the finer scale locations that can be used to calculate precise spatial data for bird observations. For convenience, we'll use the \texttt{geoNEON} package to make the calculations. First we'll use \texttt{getLocByName()} to get the additional spatial information available through the API, and look at the spatial resolution available in the initial download: \begin{Shaded} \begin{Highlighting}[] \CommentTok{# load the geoNEON package} \KeywordTok{library}\NormalTok{(geoNEON)} \CommentTok{# extract the spatial data} \NormalTok{brd.point.loc <-}\StringTok{ }\KeywordTok{getLocByName}\NormalTok{(brd.point)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## | | | 0% | |========= | 14% | |=================== | 29% | |============================ | 43% | |===================================== | 57% | |============================================== | 71% | |======================================================== | 86% | |=================================================================| 100% \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# plot bird point locations } \CommentTok{# note that decimal degrees is also an option in the data} \KeywordTok{symbols}\NormalTok{(brd.point.loc}\OperatorTok{$}\NormalTok{easting, brd.point.loc}\OperatorTok{$}\NormalTok{northing, } \DataTypeTok{circles=}\NormalTok{brd.point.loc}\OperatorTok{$}\NormalTok{coordinateUncertainty, } \DataTypeTok{xlab=}\StringTok{"Easting"}\NormalTok{, }\DataTypeTok{ylab=}\StringTok{"Northing"}\NormalTok{, }\DataTypeTok{tck=}\FloatTok{0.01}\NormalTok{, }\DataTypeTok{inches=}\NormalTok{F)} \end{Highlighting} \end{Shaded} \includegraphics{INF550_files/figure-latex/brd-extr-NL-1.pdf} And use \texttt{getLocTOS()} to calculate the point locations of observations. \begin{Shaded} \begin{Highlighting}[] \NormalTok{brd.point.pt <-}\StringTok{ }\KeywordTok{getLocTOS}\NormalTok{(brd.point, }\StringTok{"brd_perpoint"}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## | | | 0% | |= | 2% | |== | 3% | |=== | 5% | |==== | 6% | |===== | 8% | |====== | 10% | |======= | 11% | |======== | 13% | |========= | 14% | |========== | 16% | |=========== | 17% | |============ | 19% | |============= | 21% | |============== | 22% | |=============== | 24% | |================= | 25% | |================== | 27% | |=================== | 29% | |==================== | 30% | |===================== | 32% | |====================== | 33% | |======================= | 35% | |======================== | 37% | |========================= | 38% | |========================== | 40% | |=========================== | 41% | |============================ | 43% | |============================= | 44% | |============================== | 46% | |=============================== | 48% | |================================ | 49% | |================================= | 51% | |================================== | 52% | |=================================== | 54% | |==================================== | 56% | |===================================== | 57% | |====================================== | 59% | |======================================= | 60% | |======================================== | 62% | |========================================= | 63% | |========================================== | 65% | |=========================================== | 67% | |============================================ | 68% | |============================================= | 70% | |============================================== | 71% | |=============================================== | 73% | |================================================ | 75% | |================================================== | 76% | |=================================================== | 78% | |==================================================== | 79% | |===================================================== | 81% | |====================================================== | 83% | |======================================================= | 84% | |======================================================== | 86% | |========================================================= | 87% | |========================================================== | 89% | |=========================================================== | 90% | |============================================================ | 92% | |============================================================= | 94% | |============================================================== | 95% | |=============================================================== | 97% | |================================================================ | 98% | |=================================================================| 100% \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# plot bird point locations } \CommentTok{# note that decimal degrees is also an option in the data} \CommentTok{# symbols(brd.point.pt$easting, brd.point.pt$northing, } \CommentTok{# circles=brd.point.pt$adjCoordinateUncertainty, } \CommentTok{# xlab="Easting", ylab="Northing", tck=0.01, inches=F)} \end{Highlighting} \end{Shaded} Now you can see the individual points where the respective point counts were located. \hypertarget{taxonomy}{% \section{Taxonomy}\label{taxonomy}} NEON maintains accepted taxonomies for many of the taxonomic identification data we collect. NEON taxonomies are available for query via the API; they are also provided via an interactive user interface, the Taxon Viewer. NEON taxonomy data provides the reference information for how NEON validates taxa; an identification must appear in the taxonomy lists in order to be accepted into the NEON database. Additions to the lists are reviewed regularly. The taxonomy lists also provide the author of the scientific name, and the reference text used. The taxonomy endpoint of the API works a little bit differently from the other endpoints. In the ``Anatomy of an API Call'' section above, each endpoint has a single type of target - a data product number, a named location name, etc. For taxonomic data, there are multiple query options, and some of them can be used in combination. For example, a query for taxa in the Pinaceae family: {\url{http://data.neonscience.org/api/v0/taxonomy}}{/?family=Pinaceae} The available types of queries are listed in the taxonomy section of the API web page. Briefly, they are: \begin{itemize} \tightlist \item \texttt{taxonTypeCode}: Which of the taxonomies maintained by NEON are you looking for? BIRD, FISH, PLANT, etc. Cannot be used in combination with the taxonomic rank queries. \item each of the major taxonomic ranks from genus through kingdom \item \texttt{scientificname}: Genus + specific epithet (+ authority). Search is by exact match only, see final example below. \item \texttt{verbose}: Do you want the short (\texttt{false}) or long (\texttt{true}) response \item \texttt{offset}: Skip this number of items in the list. Defaults to 50. \item \texttt{limit}: Result set will be truncated at this length. Defaults to 50. \end{itemize} Staff on the NEON project have plans to modify the settings for \texttt{offset} and \texttt{limit}, such that \texttt{offset} will default to 0 and \texttt{limit} will default to ∞, but in the meantime users will want to set these manually. They are set to non-default values in the examples below. For the first example, let's query for the loon family, Gaviidae, in the bird taxonomy. Note that query parameters are case-sensitive. \begin{Shaded} \begin{Highlighting}[] \NormalTok{loon.req <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(}\StringTok{"http://data.neonscience.org/api/v0/taxonomy/?family=Gaviidae&offset=0&limit=500"}\NormalTok{)} \end{Highlighting} \end{Shaded} Parse the results into a list using \texttt{fromJSON()}: \begin{Shaded} \begin{Highlighting}[] \NormalTok{loon.list <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(loon.req, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{))} \end{Highlighting} \end{Shaded} And look at the \texttt{\$data} element of the results, which contains: \begin{itemize} \tightlist \item The full taxonomy of each taxon \item The short taxon code used by NEON (taxonID/acceptedTaxonID) \item The author of the scientific name (scientificNameAuthorship) \item The vernacular name, if applicable \item The reference text used (nameAccordingToID) \end{itemize} The terms used for each field are matched to Darwin Core (dwc) and the Global Biodiversity Information Facility (gbif) terms, where possible, and the matches are indicated in the column headers. \begin{Shaded} \begin{Highlighting}[] \NormalTok{loon.list}\OperatorTok{$}\NormalTok{data} \end{Highlighting} \end{Shaded} \begin{verbatim} ## taxonTypeCode taxonID acceptedTaxonID dwc:scientificName ## 1 BIRD ARLO ARLO Gavia arctica ## 2 BIRD COLO COLO Gavia immer ## 3 BIRD PALO PALO Gavia pacifica ## 4 BIRD RTLO RTLO Gavia stellata ## 5 BIRD YBLO YBLO Gavia adamsii ## dwc:scientificNameAuthorship dwc:taxonRank dwc:vernacularName ## 1 (Linnaeus) species Arctic Loon ## 2 (Brunnich) species Common Loon ## 3 (Lawrence) species Pacific Loon ## 4 (Pontoppidan) species Red-throated Loon ## 5 (G. R. Gray) species Yellow-billed Loon ## dwc:nameAccordingToID dwc:kingdom dwc:phylum dwc:class dwc:order ## 1 doi: 10.1642/AUK-15-73.1 Animalia Chordata Aves Gaviiformes ## 2 doi: 10.1642/AUK-15-73.1 Animalia Chordata Aves Gaviiformes ## 3 doi: 10.1642/AUK-15-73.1 Animalia Chordata Aves Gaviiformes ## 4 doi: 10.1642/AUK-15-73.1 Animalia Chordata Aves Gaviiformes ## 5 doi: 10.1642/AUK-15-73.1 Animalia Chordata Aves Gaviiformes ## dwc:family dwc:genus gbif:subspecies gbif:variety ## 1 Gaviidae Gavia NA NA ## 2 Gaviidae Gavia NA NA ## 3 Gaviidae Gavia NA NA ## 4 Gaviidae Gavia NA NA ## 5 Gaviidae Gavia NA NA \end{verbatim} To get the entire list for a particular taxonomic type, use the \texttt{taxonTypeCode} query. Be cautious with this query, the PLANT taxonomic list has several hundred thousand entries. For an example, let's look up the small mammal taxonomic list, which is one of the shorter ones, and use the \texttt{verbose=true} option to see a more extensive list of taxon data, including many taxon ranks that aren't populated for these taxa. For space here, we display only the first 10 taxa: \begin{Shaded} \begin{Highlighting}[] \NormalTok{mam.req <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(}\StringTok{"http://data.neonscience.org/api/v0/taxonomy/?taxonTypeCode=SMALL_MAMMAL&offset=0&limit=500&verbose=true"}\NormalTok{)} \NormalTok{mam.list <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(mam.req, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{))} \NormalTok{mam.list}\OperatorTok{$}\NormalTok{data[}\DecValTok{1}\OperatorTok{:}\DecValTok{10}\NormalTok{,]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## taxonTypeCode taxonID acceptedTaxonID dwc:scientificName ## 1 SMALL_MAMMAL AMHA AMHA Ammospermophilus harrisii ## 2 SMALL_MAMMAL AMIN AMIN Ammospermophilus interpres ## 3 SMALL_MAMMAL AMLE AMLE Ammospermophilus leucurus ## 4 SMALL_MAMMAL AMLT AMLT Ammospermophilus leucurus tersus ## 5 SMALL_MAMMAL AMNE AMNE Ammospermophilus nelsoni ## 6 SMALL_MAMMAL AMSP AMSP Ammospermophilus sp. ## 7 SMALL_MAMMAL APRN APRN Aplodontia rufa nigra ## 8 SMALL_MAMMAL APRU APRU Aplodontia rufa ## 9 SMALL_MAMMAL ARAL ARAL Arborimus albipes ## 10 SMALL_MAMMAL ARLO ARLO Arborimus longicaudus ## dwc:scientificNameAuthorship dwc:taxonRank ## 1 Audubon and Bachman species ## 2 Merriam species ## 3 Merriam species ## 4 Goldman subspecies ## 5 Merriam species ## 6 <NA> genus ## 7 Taylor subspecies ## 8 Rafinesque species ## 9 Merriam species ## 10 True species ## dwc:vernacularName taxonProtocolCategory ## 1 Harriss Antelope Squirrel opportunistic ## 2 Texas Antelope Squirrel opportunistic ## 3 Whitetailed Antelope Squirrel opportunistic ## 4 <NA> opportunistic ## 5 Nelsons Antelope Squirrel opportunistic ## 6 <NA> opportunistic ## 7 <NA> non-target ## 8 Sewellel non-target ## 9 Whitefooted Vole target ## 10 Red Tree Vole target ## dwc:nameAccordingToID ## 1 isbn: 978 0801882210 ## 2 isbn: 978 0801882210 ## 3 isbn: 978 0801882210 ## 4 isbn: 978 0801882210 ## 5 isbn: 978 0801882210 ## 6 isbn: 978 0801882210 ## 7 isbn: 978 0801882210 ## 8 isbn: 978 0801882210 ## 9 isbn: 978 0801882210 ## 10 isbn: 978 0801882210 ## dwc:nameAccordingToTitle ## 1 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 2 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 3 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 4 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 5 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 6 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 7 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 8 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 9 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## 10 Wilson D. E. and D. M. Reeder. 2005. Mammal Species of the World; A Taxonomic and Geographic Reference. Third edition. Johns Hopkins University Press; Baltimore, MD. ## dwc:kingdom gbif:subkingdom gbif:infrakingdom gbif:superdivision ## 1 Animalia NA NA NA ## 2 Animalia NA NA NA ## 3 Animalia NA NA NA ## 4 Animalia NA NA NA ## 5 Animalia NA NA NA ## 6 Animalia NA NA NA ## 7 Animalia NA NA NA ## 8 Animalia NA NA NA ## 9 Animalia NA NA NA ## 10 Animalia NA NA NA ## gbif:division gbif:subdivision gbif:infradivision gbif:parvdivision ## 1 NA NA NA NA ## 2 NA NA NA NA ## 3 NA NA NA NA ## 4 NA NA NA NA ## 5 NA NA NA NA ## 6 NA NA NA NA ## 7 NA NA NA NA ## 8 NA NA NA NA ## 9 NA NA NA NA ## 10 NA NA NA NA ## gbif:superphylum dwc:phylum gbif:subphylum gbif:infraphylum ## 1 NA Chordata NA NA ## 2 NA Chordata NA NA ## 3 NA Chordata NA NA ## 4 NA Chordata NA NA ## 5 NA Chordata NA NA ## 6 NA Chordata NA NA ## 7 NA Chordata NA NA ## 8 NA Chordata NA NA ## 9 NA Chordata NA NA ## 10 NA Chordata NA NA ## gbif:superclass dwc:class gbif:subclass gbif:infraclass gbif:superorder ## 1 NA Mammalia NA NA NA ## 2 NA Mammalia NA NA NA ## 3 NA Mammalia NA NA NA ## 4 NA Mammalia NA NA NA ## 5 NA Mammalia NA NA NA ## 6 NA Mammalia NA NA NA ## 7 NA Mammalia NA NA NA ## 8 NA Mammalia NA NA NA ## 9 NA Mammalia NA NA NA ## 10 NA Mammalia NA NA NA ## dwc:order gbif:suborder gbif:infraorder gbif:section gbif:subsection ## 1 Rodentia NA NA NA NA ## 2 Rodentia NA NA NA NA ## 3 Rodentia NA NA NA NA ## 4 Rodentia NA NA NA NA ## 5 Rodentia NA NA NA NA ## 6 Rodentia NA NA NA NA ## 7 Rodentia NA NA NA NA ## 8 Rodentia NA NA NA NA ## 9 Rodentia NA NA NA NA ## 10 Rodentia NA NA NA NA ## gbif:superfamily dwc:family gbif:subfamily gbif:tribe gbif:subtribe ## 1 NA Sciuridae Xerinae Marmotini NA ## 2 NA Sciuridae Xerinae Marmotini NA ## 3 NA Sciuridae Xerinae Marmotini NA ## 4 NA Sciuridae Xerinae Marmotini NA ## 5 NA Sciuridae Xerinae Marmotini NA ## 6 NA Sciuridae Xerinae Marmotini NA ## 7 NA Aplodontiidae <NA> <NA> NA ## 8 NA Aplodontiidae <NA> <NA> NA ## 9 NA Cricetidae Arvicolinae <NA> NA ## 10 NA Cricetidae Arvicolinae <NA> NA ## dwc:genus dwc:subgenus gbif:subspecies gbif:variety ## 1 Ammospermophilus <NA> NA NA ## 2 Ammospermophilus <NA> NA NA ## 3 Ammospermophilus <NA> NA NA ## 4 Ammospermophilus <NA> NA NA ## 5 Ammospermophilus <NA> NA NA ## 6 Ammospermophilus <NA> NA NA ## 7 Aplodontia <NA> NA NA ## 8 Aplodontia <NA> NA NA ## 9 Arborimus <NA> NA NA ## 10 Arborimus <NA> NA NA ## gbif:subvariety gbif:form gbif:subform speciesGroup dwc:specificEpithet ## 1 NA NA NA <NA> harrisii ## 2 NA NA NA <NA> interpres ## 3 NA NA NA <NA> leucurus ## 4 NA NA NA <NA> leucurus ## 5 NA NA NA <NA> nelsoni ## 6 NA NA NA <NA> sp. ## 7 NA NA NA <NA> rufa ## 8 NA NA NA <NA> rufa ## 9 NA NA NA <NA> albipes ## 10 NA NA NA <NA> longicaudus ## dwc:infraspecificEpithet ## 1 <NA> ## 2 <NA> ## 3 <NA> ## 4 tersus ## 5 <NA> ## 6 <NA> ## 7 nigra ## 8 <NA> ## 9 <NA> ## 10 <NA> \end{verbatim} To get information about a single taxon, use the \texttt{scientificname} query. This query will not do a fuzzy match, so you need to query the exact name of the taxon in the NEON taxonomy. Because of this, the query will be most useful when you already have NEON data in hand and are looking for more information about a specific taxon. Querying on \texttt{scientificname} is unlikely to be an efficient way to figure out if NEON recognizes a particular taxon. In addition, scientific names contain spaces, which are not allowed in a URL. The spaces need to be replaced with the URL encoding replacement, \%20. For an example, let's look up the little sand verbena, \emph{Abronia minor Standl.} Searching for \emph{Abronia minor} will fail, because the NEON taxonomy for this species includes the authority. The search will also fail with spaces. Search for \texttt{Abronia\%20minor\%20Standl.}, and in this case we can omit \texttt{offset} and \texttt{limit} because we know there can only be a single result: \begin{Shaded} \begin{Highlighting}[] \NormalTok{am.req <-}\StringTok{ }\KeywordTok{GET}\NormalTok{(}\StringTok{"http://data.neonscience.org/api/v0/taxonomy/?scientificname=Abronia%20minor%20Standl."}\NormalTok{)} \NormalTok{am.list <-}\StringTok{ }\NormalTok{jsonlite}\OperatorTok{::}\KeywordTok{fromJSON}\NormalTok{(}\KeywordTok{content}\NormalTok{(am.req, }\DataTypeTok{as=}\StringTok{"text"}\NormalTok{))} \NormalTok{am.list}\OperatorTok{$}\NormalTok{data} \end{Highlighting} \end{Shaded} \begin{verbatim} ## taxonTypeCode taxonID acceptedTaxonID dwc:scientificName ## 1 PLANT ABMI2 ABMI2 Abronia minor Standl. ## dwc:scientificNameAuthorship dwc:taxonRank dwc:vernacularName ## 1 Standl. species little sand verbena ## dwc:nameAccordingToID dwc:kingdom dwc:phylum ## 1 http://plants.usda.gov (accessed 8/25/2014) Plantae Magnoliophyta ## dwc:class dwc:order dwc:family dwc:genus gbif:subspecies ## 1 Magnoliopsida Caryophyllales Nyctaginaceae Abronia NA ## gbif:variety ## 1 NA \end{verbatim} \hypertarget{stacking-neon-data}{% \section{Stacking NEON data}\label{stacking-neon-data}} At the top of this tutorial, we installed the \texttt{neonUtilities} package. This is a custom R package that stacks the monthly files provided by the NEON data portal into a single continuous file for each type of data table in the download. It currently handles files downloaded from the data portal, but not files pulled from the API. That functionality will be added soon! For a guide to using \texttt{neonUtilities} on data downloaded from the portal, look here. \hypertarget{exercises}{% \section{Exercises}\label{exercises}} \hypertarget{computational}{% \subsection{Computational}\label{computational}} \hypertarget{part-1-sign-up-for-and-use-an-neon-api-token}{% \subsubsection{Part 1: Sign up for and Use an NEON API Token:}\label{part-1-sign-up-for-and-use-an-neon-api-token}} \emph{Code to come} \hypertarget{written}{% \subsection{Written}\label{written}} \textbf{Question 1:} How might or does the NEON project intersect with your current research or future career goals? \emph{(1 paragraph)} \leavevmode\hypertarget{ds-challenge}{}% \textbf{Question 2:} Use the map in week 2:Intro to NEON to answer the following questions. Consider the research question that you may explore as your final semester project or a current project that you are working on and answer each of the following questions: \begin{itemize} \tightlist \item Are there NEON field sites that are in study regions of interest to you?\\ \item What domains are the sites located in?\\ \item What NEON field sites do your current research or Capstone Project ideas coincide with?\\ \item Is the site or sites core or relocatable?\\ \item Is or are they terrestrial or aquatic?\\ \item Are there data available for the NEON field site(s) that you are most interested in? What kind of data are available? \end{itemize} \leavevmode\hypertarget{ds-challenge}{}% \textbf{Question 3:} Consider either your current or future research, or a question you'd like to address durring this course: \begin{itemize} \tightlist \item Which types of NEON data may be more useful to address these questions? \item What non-NEON data resources could be combined with NEON data to help address your question? \item What challenges, if any, could you foresee when beginning to work with these data? \end{itemize} \leavevmode\hypertarget{ds-challenge}{}% \textbf{Question 4:} Use the Data Portal tools to investigate the data availability for the field sites you've already identified in the previous questions: \begin{itemize} \tightlist \item What types of aquatic or terrestrial data are currently available? Remote sensing data?\\ \item Of these, what type of data are you most interested in working with for your project during this course?\\ \item For what time period does the data cover?\\ \item What format is the downloadable file available in?\\ \item Where is the metadata to support this data?\\ \end{itemize} \textbf{Intro to NEON Culmination Activity} Write up a 1-page summary of a project that you might want to explore using NEON data over the duration of this course. Include the types of NEON (and other data) that you will need to implement this project. Save this summary as you will be refining and adding to your ideas over the course of the semester. \hypertarget{introduction-to-usa-npn-its-data}{% \chapter{Introduction to USA-NPN \& its Data}\label{introduction-to-usa-npn-its-data}} \begin{longtable}[]{@{}l@{}} \toprule \endhead Estimated Time: 2 hours\tabularnewline \bottomrule \end{longtable} \leavevmode\hypertarget{ds-challenge}{}% \textbf{Course participants}: As you review this information, please consider the final course project that you will work on at the over this semester. At the end of this section, you will document an initial research question or idea and associated data needed to address that question, that you may want to explore while pursuing this course. \hypertarget{ds-objectives}{} \begin{center}\rule{0.5\linewidth}{0.5pt}\end{center} \hypertarget{learning-objectives}{% \section{Learning Objectives}\label{learning-objectives}} At the end of this activity, you will be able to: \begin{center}\rule{0.5\linewidth}{0.5pt}\end{center} \hypertarget{usa-npn-project-mission-design}{% \section{USA-NPN Project Mission \& Design:}\label{usa-npn-project-mission-design}} The \href{www.usanpn.org}{USA National Phenology Network (USA-NPN)} collects, organizes, and shares phenological data and information to aid decision-making, scientific discovery, and a broader understanding of phenology from a diversity of perspectives. The USA National Phenology Network consists of a National Coordinating Office (NCO), thousands of volunteer observers and many partners, including research scientists, resource managers, educators, and policy-makers. Anyone who participates in Nature's Notebook or collaborates with NCO staff to advance the science of phenology or to inform decisions is part of the USA-NPN. \hypertarget{vision-mission}{% \section{Vision \& Mission}\label{vision-mission}} USA-NPN's vision is to provide data and information on the timing of seasonal events in plants and animals to ensure the well-being of humans, ecosystems, and natural resources. To support this and its mission the USA-NPN collects, organizes, and shares phenological data and information to aid decision-making, scientific discovery, and a broader understanding of phenology from a diversity of perspectives. \hypertarget{relevant-documents-background-information}{% \subsection{Relevant documents \& background information:}\label{relevant-documents-background-information}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \href{https://usanpn.org/files/npn/reports/USA-NPN_StrategicPlan_2019-2024.pdf}{USA-NPN Strategic Plan} \item \href{https://usanpn.org/files/npn/reports/USA-NPN_InfoSheet_2020.pdf}{USA-NPN Information Sheet: Tracking seasonal changes to support science, natural resource management, and society} \item \href{https://www.usanpn.org/files/npn/reports/USA-NPN_AnnualReport2019.pdf}{2019 USA-NPN Annual Report} \end{enumerate} \hypertarget{usa-npns-spatial-design}{% \section{USA-NPN's Spatial design:}\label{usa-npns-spatial-design}} As a \href{https://www.youtube.com/watch?v=WR34LGvuFac}{citizen-science based platform}, the spatial sampling of USA-NPN data is opportunistic, since observations are contributed voluntarily by citizen scientist participants. \hypertarget{types-of-usa-npn-data}{% \section{Types of USA-NPN Data:}\label{types-of-usa-npn-data}} \begin{itemize} \tightlist \item Observational - As described in USA National Phenology Network Observational Data Documentation \href{https://pubs.usgs.gov/of/2018/1060/ofr20181060.pdf}{(Rosemartin et al.~2018)} \begin{itemize} \tightlist \item Status/intensity \item Individual phenometrics \item Site-level phenometrics \includegraphics{/Users/kdw223/Research/katharynduffy.github.io/images/USA_NPN_data_types.png} \end{itemize} \item Gridded data products - As described in USA National Phenology Network gridded products documentation \href{https://pubs.usgs.gov/of/2017/1003/ofr20171003.pdf}{(Crimmins et al.~2017)} \includegraphics{/Users/kdw223/Research/katharynduffy.github.io/images/USA-NPN-AGDD-32base-Anomaly.gif} \item Pheno-Forecast products - As described in ``Short-term forecasts of insect phenology inform pest management'' \href{https://academic.oup.com/aesa/article/113/2/139/5727873}{(Crimmins et al.~2020)} \end{itemize} \begin{figure} \centering \includegraphics{/Users/kdw223/Research/katharynduffy.github.io/images/buffelgrass_10.18.19.gif} \caption{Example of USA-NPN's Buffel Grass Pheno-Forecast for Arizona.} \end{figure} \hypertarget{how-to-access-usa-npn-data}{% \section{How to Access USA-NPN Data:}\label{how-to-access-usa-npn-data}} Summary here \hypertarget{the-usa-npn-landing-page}{% \subsection{The USA-NPN Landing page}\label{the-usa-npn-landing-page}} A concise list of all available NPN data sets, tools, products. \hypertarget{apis}{% \subsection{APIs}\label{apis}} This is a set of standard web service calls that allows for programmatic access to NPN data independent of any particular programming language. \hypertarget{rnpn-package}{% \subsection{Rnpn package}\label{rnpn-package}} This suite of R functions allows for programmatic access to both gridded and in-situ NPN data sets in an R environment. Full documentation available here: \url{https://usa-npn.github.io/rnpn/} \hypertarget{phenology-observation-portal-for-observational-data}{% \subsection{Phenology Observation Portal (for observational data)}\label{phenology-observation-portal-for-observational-data}} This tool allows for users to download customized datasets of observational data from the National Phenology Database, which includes phenology data collected via the Nature's Notebook phenology program (2009-present for the United States), and additional integrated datasets, such as historical lilac and honeysuckle data (1955-present). Filters are available to specify dates, regions, species and phenophases of interest. This provides access to all phenometrics, which represents varying degrees of data aggregation. Geospatial Request Builder (for raster data and image files) This tool simplifies the process of accessing NPN gridded data through standard WMS and WCS services. WMS services provide the data as basic graphic images, such as PNGs or TIFFs, whereas WCS services provide the same data in formats accessible to GIS applications. \#\#\# Visualization Tool The Visualization Tool provides an easier way to explore phenology data and maps. The user-friendly interface is intended to allow for searching for comparing general trends and quick-and-easy access to map data/products. \hypertarget{exercises}{% \section{Exercises}\label{exercises}} \hypertarget{computational}{% \subsection{Computational}\label{computational}} \hypertarget{ds-challenge}{} \hypertarget{written}{% \subsection{Written}\label{written}} \textbf{Question 1:} How might or does USA-NPN intersect with your current research or future career goals? \emph{(1 paragraph)} \textbf{Question 2:} \textbf{Question 4:} \textbf{Intro to USA-NPN Culmination Activity} Write up a 1-page summary of a project that you might want to explore using USA-NPN data over the duration of this course. Include the types of USA-NPN (and other data) that you will need to implement this project. Save this summary as you will be refining and adding to your ideas over the course of the semester. \hypertarget{applications}{% \chapter{Applications}\label{applications}} Some \emph{significant} applications are demonstrated in this chapter. \hypertarget{example-one}{% \section{Example one}\label{example-one}} \hypertarget{example-two}{% \section{Example two}\label{example-two}} \#\#Example three \hypertarget{usa-npn-data-access-uses-challenges}{% \chapter{USA-NPN Data Access, Uses \& Challenges}\label{usa-npn-data-access-uses-challenges}} Some \emph{significant} applications are demonstrated in this chapter. \hypertarget{example-one}{% \section{Example one}\label{example-one}} \hypertarget{example-two}{% \section{Example two}\label{example-two}} \hypertarget{digital-repeat-photography-networks}{% \chapter{Digital Repeat Photography Networks}\label{digital-repeat-photography-networks}} \begin{longtable}[]{@{}l@{}} \toprule \endhead Estimated Time: \_\_\_ hour(s)\tabularnewline \bottomrule \end{longtable} \leavevmode\hypertarget{ds-challenge}{}% \textbf{Course participants}: As you review this information, please consider the final course project that you will work on at the over this semester. At the end of this section, you will document an initial research question or idea and associated data needed to address that question, that you may want to explore while pursuing this course. \leavevmode\hypertarget{ds-objectives}{}% -- \hypertarget{learning-objectives}{% \section{Learning Objectives}\label{learning-objectives}} At the end of this activity, you will be able to: -- \hypertarget{the-phenocam-network-mission-design}{% \section{The PhenoCam Network Mission \& Design}\label{the-phenocam-network-mission-design}} \hypertarget{exercises}{% \section{Exercises}\label{exercises}} \hypertarget{computational}{% \subsection{Computational}\label{computational}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \end{enumerate} \hypertarget{ds-challenge}{} \hypertarget{written}{% \subsection{Written}\label{written}} \textbf{Question 1:} How might or does the NEON project intersect with your current research or future career goals? \emph{(1 paragraph)} \leavevmode\hypertarget{ds-challenge}{}% \textbf{Question 2:} Use the map in week 2:Intro to NEON to answer the following questions. Consider the research question that you may explore as your final semester project or a current project that you are working on and answer each of the following questions: \begin{itemize} \tightlist \item Are there NEON field sites that are in study regions of interest to you?\\ \item What domains are the sites located in?\\ \item What NEON field sites do your current research or Capstone Project ideas coincide with?\\ \item Is the site or sites core or relocatable?\\ \item Is or are they terrestrial or aquatic?\\ \item Are there data available for the NEON field site(s) that you are most interested in? What kind of data are available? \end{itemize} \leavevmode\hypertarget{ds-challenge}{}% \textbf{Question 3:} Consider either your current or future research, or a question you'd like to address durring this course: \begin{itemize} \tightlist \item Which types of NEON data may be more useful to address these questions? \item What non-NEON data resources could be combined with NEON data to help address your question? \item What challenges, if any, could you foresee when beginning to work with these data? \end{itemize} \leavevmode\hypertarget{ds-challenge}{}% \textbf{Question 4:} Use the Data Portal tools to investigate the data availability for the field sites you've already identified in the previous questions: \begin{itemize} \tightlist \item What types of aquatic or terrestrial data are currently available? Remote sensing data?\\ \item Of these, what type of data are you most interested in working with for your project during this course?\\ \item For what time period does the data cover?\\ \item What format is the downloadable file available in?\\ \item Where is the metadata to support this data?\\ \end{itemize} \textbf{Intro to USA-NPN Culmination Activity} Write up a 1-page summary of a project that you might want to explore using NEON data over the duration of this course. Include the types of USA-NPN (and other data) that you will need to implement this project. Save this summary as you will be refining and adding to your ideas over the course of the semester. \hypertarget{neon-aop-introduction}{% \chapter{NEON AOP Introduction}\label{neon-aop-introduction}} \begin{longtable}[]{@{}l@{}} \toprule \endhead Estimated Time: .25-.5 hours\tabularnewline \bottomrule \end{longtable} \hypertarget{ds-objectives}{} \begin{center}\rule{0.5\linewidth}{0.5pt}\end{center} \hypertarget{learning-objectives}{% \section{Learning Objectives}\label{learning-objectives}} After completing this tutorial, you will be able to: \begin{itemize} \tightlist \item Define hyperspectral remote sensing. \item Explain the fundamental principles of hyperspectral remote sensing data. \item Describe the key attributes that are required to effectively work with hyperspectral remote sensing data in tools like R or Python. \item Describe what a ``band'' is. \end{itemize} \begin{center}\rule{0.5\linewidth}{0.5pt}\end{center} \hypertarget{mapping-the-invisible}{% \subsubsection{Mapping the Invisible}\label{mapping-the-invisible}} \hypertarget{about-hyperspectral-remote-sensing-data}{% \section{About Hyperspectral Remote Sensing Data}\label{about-hyperspectral-remote-sensing-data}} The electromagnetic spectrum is composed of thousands of bands representing different types of light energy. Imaging spectrometers (instruments that collect hyperspectral data) break the electromagnetic spectrum into groups of bands that support classification of objects by their spectral properties on the earth's surface. Hyperspectral data consists of many bands -- up to hundreds of bands -- that cover the electromagnetic spectrum. The NEON imaging spectrometer collects data within the 380nm to 2510nm portions of the electromagnetic spectrum within bands that are approximately 5nm in width. This results in a hyperspectral data cube that contains approximately 426 bands - which means big, big data. \hypertarget{key-metadata-for-hyperspectral-data}{% \section{Key Metadata for Hyperspectral Data}\label{key-metadata-for-hyperspectral-data}} \hypertarget{bands-and-wavelengths}{% \subsection{Bands and Wavelengths}\label{bands-and-wavelengths}} A \emph{band} represents a group of wavelengths. For example, the wavelength values between 695nm and 700nm might be one band as captured by an imaging spectrometer. The imaging spectrometer collects reflected light energy in a pixel for light in that band. Often when you work with a multi or hyperspectral dataset, the band information is reported as the center wavelength value. This value represents the center point value of the wavelengths represented in that band. Thus in a band spanning 695-700 nm, the center would be 697.5). Imaging spectrometers collect reflected light information within defined bands or regions of the electromagnetic spectrum. Source: National Ecological Observatory Network (NEON) \hypertarget{spectral-resolution}{% \subsection{Spectral Resolution}\label{spectral-resolution}} The spectral resolution of a dataset that has more than one band, refers to the width of each band in the dataset. In the example above, a band was defined as spanning 695-700nm. The width or spatial resolution of the band is thus 5 nanometers. To see an example of this, check out the band widths for the Landsat sensors. \hypertarget{full-width-half-max-fwhm}{% \subsection{Full Width Half Max (FWHM)}\label{full-width-half-max-fwhm}} The full width half max (FWHM) will also often be reported in a multi or hyperspectral dataset. This value represents the spread of the band around that center point. The Full Width Half Max (FWHM) of a band relates to the distance in nanometers between the band center and the edge of the band. In this case, the FWHM for Band C is 5 nm. In the illustration above, the band that covers 695-700nm has a FWHM of 5 nm. While a general spectral resolution of the sensor is often provided, not all sensors create bands of uniform widths. For instance bands 1-9 of Landsat 8 are listed below (Courtesy of USGS) \begin{longtable}[]{@{}llll@{}} \toprule \begin{minipage}[b]{0.36\columnwidth}\raggedright Band\strut \end{minipage} & \begin{minipage}[b]{0.18\columnwidth}\raggedright Wavelength range (microns)\strut \end{minipage} & \begin{minipage}[b]{0.19\columnwidth}\raggedright Spatial Resolution (m)\strut \end{minipage} & \begin{minipage}[b]{0.16\columnwidth}\raggedright Spectral Width (microns)\strut \end{minipage}\tabularnewline \midrule \endhead \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 1 - Coastal aerosol\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 0.43 - 0.45\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 30\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.02\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 2 - Blue\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 0.45 - 0.51\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 30\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.06\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 3 - Green\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 0.53 - 0.59\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 30\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.06\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 4 - Red\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 0.64 - 0.67\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 30\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.03\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 5 - Near Infrared (NIR)\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 0.85 - 0.88\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 30\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.03\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 6 - SWIR 1\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 1.57 - 1.65\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 30\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.08\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 7 - SWIR 2\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 2.11 - 2.29\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 30\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.18\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 8 - Panchromatic\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 0.50 - 0.68\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 15\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.18\strut \end{minipage}\tabularnewline \begin{minipage}[t]{0.36\columnwidth}\raggedright Band 9 - Cirrus\strut \end{minipage} & \begin{minipage}[t]{0.18\columnwidth}\raggedright 1.36 - 1.38\strut \end{minipage} & \begin{minipage}[t]{0.19\columnwidth}\raggedright 30\strut \end{minipage} & \begin{minipage}[t]{0.16\columnwidth}\raggedright 0.02\strut \end{minipage}\tabularnewline \bottomrule \end{longtable} \bibliography{book.bib,packages.bib} \end{document}
{ "alphanum_fraction": 0.6053260367, "avg_line_length": 58.3475746868, "ext": "tex", "hexsha": "f5dd540481cd89e32a02801440bb544f120efd6e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4f7de33f262c70a1661daedbd8b9b29ccc4d0925", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "mdr359/katharynduffy.github.io", "max_forks_repo_path": "INF550.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4f7de33f262c70a1661daedbd8b9b29ccc4d0925", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "mdr359/katharynduffy.github.io", "max_issues_repo_path": "INF550.tex", "max_line_length": 1348, "max_stars_count": 1, "max_stars_repo_head_hexsha": "4f7de33f262c70a1661daedbd8b9b29ccc4d0925", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "mdr359/katharynduffy.github.io", "max_stars_repo_path": "INF550.tex", "max_stars_repo_stars_event_max_datetime": "2020-11-11T17:06:58.000Z", "max_stars_repo_stars_event_min_datetime": "2020-11-11T17:06:58.000Z", "num_tokens": 48355, "size": 181636 }
\documentclass{article} \usepackage{fancyhdr} \usepackage{extramarks} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb} \usepackage{amsfonts} \usepackage{tikz} \usepackage{physics} \usepackage[plain]{algorithm} \usepackage{algpseudocode} \usepackage{hyperref} \usetikzlibrary{automata,positioning} % % Basic Document Settings % \topmargin=-0.45in \evensidemargin=0in \oddsidemargin=0in \textwidth=6.5in \textheight=9.0in \headsep=0.25in \linespread{1.1} \pagestyle{fancy} \lhead{\hmwkAuthorName} \chead{\hmwkClass\ : \hmwkTitle} \rhead{\firstxmark} \lfoot{\lastxmark} \cfoot{\thepage} \renewcommand\headrulewidth{0.4pt} \renewcommand\footrulewidth{0.4pt} \setlength\parindent{0pt} % % Create Problem Sections % \newcommand{\be}{\begin{equation}} \newcommand{\ee}{\end{equation}} \newcommand{\bes}{\begin{equation*}} \newcommand{\ees}{\end{equation*}} \newcommand{\bea}{\begin{flalign*}} \newcommand{\eea}{\end{flalign*}} \newcommand{\enterProblemHeader}[1]{ \nobreak\extramarks{}{Problem \arabic{#1} continued on next page\ldots}\nobreak{} \nobreak\extramarks{Problem \arabic{#1} (continued)}{Problem \arabic{#1} continued on next page\ldots}\nobreak{} } \newcommand{\exitProblemHeader}[1]{ \nobreak\extramarks{Problem \arabic{#1} (continued)}{Problem \arabic{#1} continued on next page\ldots}\nobreak{} \stepcounter{#1} \nobreak\extramarks{Problem \arabic{#1}}{}\nobreak{} } \setcounter{secnumdepth}{0} \newcounter{partCounter} \newcounter{homeworkProblemCounter} \setcounter{homeworkProblemCounter}{1} \nobreak\extramarks{Problem \arabic{homeworkProblemCounter}}{}\nobreak{} % % Homework Problem Environment % % This environment takes an optional argument. When given, it will adjust the % problem counter. This is useful for when the problems given for your % assignment aren't sequential. See the last 3 problems of this template for an % example. % \newenvironment{homeworkProblem}[1][-1]{ \ifnum#1>0 \setcounter{homeworkProblemCounter}{#1} \fi \section{Problem \arabic{homeworkProblemCounter}} \setcounter{partCounter}{1} \enterProblemHeader{homeworkProblemCounter} }{ \exitProblemHeader{homeworkProblemCounter} } % % Homework Details % - Title % - Due date % - Class % - Section/Time % - Instructor % - Author % \newcommand{\hmwkTitle}{Homework\ \#1} \newcommand{\hmwkDueDate}{Due on 14th January, 2019} \newcommand{\hmwkClass}{Dynamical Systems} \newcommand{\hmwkClassTime}{} \newcommand{\hmwkClassInstructor}{} \newcommand{\hmwkAuthorName}{\textbf{Aditya Vijaykumar}} % % Title Page % \title{ %\vspace{2in} \textmd{\textbf{\hmwkClass:\ \hmwkTitle}}\\ \normalsize\vspace{0.1in}\small{\hmwkDueDate\ }\\ % \vspace{3in} } \author{\hmwkAuthorName} \date{} \renewcommand{\part}[1]{\textbf{\large Part \Alph{partCounter}}\stepcounter{partCounter}\\} % % Various Helper Commands % % Useful for algorithms \newcommand{\alg}[1]{\textsc{\bfseries \footnotesize #1}} % For derivatives \newcommand{\deriv}[1]{\frac{\mathrm{d}}{\mathrm{d}x} (#1)} % For partial derivatives \newcommand{\pderiv}[2]{\frac{\partial}{\partial #1} (#2)} % Integral dx \newcommand{\dx}{\mathrm{d}x} % Alias for the Solution section header \newcommand{\solution}{\textbf{\large Solution}} % Probability commands: Expectation, Variance, Covariance, Bias \newcommand{\E}{\mathrm{E}} \newcommand{\Var}{\mathrm{Var}} \newcommand{\Cov}{\mathrm{Cov}} \newcommand{\Bias}{\mathrm{Bias}} \begin{document} \maketitle (\textbf{Acknowledgements} - I would like to thank Divya Jagannathan for discussions.) \\ \begin{homeworkProblem} \textbf{Part (a)}\\ Consider first the set $ E_u $. Any vector in this said would be given by $ V = \sum_i C_j w_j $, where $ w_j $ is a generalized eigenvector of this set. We know, \begin{equation*} (A - \lambda I)^m w_j = 0 \end{equation*} for some $ m $. This also means that, \begin{equation*} (A - \lambda I) w_j = W_j \implies A w_j = \lambda w_j + W_j \end{equation*} where $ W_j \in \ker((A - \lambda I)^{m-1})$. Hence $ A w_j \in E_u $. This also means that $ A^k w_j \in E_u $ for any whole number $ k $, which in turn means that, in general, $\sum_{k} c_k A^k w_j \in E_u$. Consider, \begin{align*} e^{At} V = \sum_{i,k} C_j \dfrac{t^k A^k}{k!} w_j \in E_u \end{align*} Hence Proved that $ E_u $ is an invariant subspace. The argument follows similarly for $ E_s, E_c $. If $ \alpha_j, \beta_j, \gamma_j $ are all eigenvectors as defined below, the most general solution of the system in given by, \begin{align*} \sum_{j} C_j w_j &= \sum_{j, \alpha_j \in E_u} M_j \alpha_j + \sum_{j, \beta_j \in E_s} N_j \beta_j + \sum_{j, \gamma_j \in E_c} K_j \gamma_j\\ \implies R^d &= E_u \oplus E_s \oplus E_c \end{align*} \textbf{Part (b)}\\ For $x_0= \sum_{j} C_j w_j, w_j \in E_s $, $ e^{At}x_0 = \sum_{j} C_j w_j e^{a_j t} e^{ i b_j t}, a_j < 0$. Hence, we can say, \begin{align*} \lim\limits_{t \rightarrow \infty } e^{At}x_0 &= \sum_{j} C_j w_j \lim\limits_{t \rightarrow \infty } e^{a_j t} e^{ i b_j t} = 0 \\ \lim\limits_{t \rightarrow - \infty } \abs{ e^{At}x_0 } &= \lim\limits_{t \rightarrow - \infty } \abs{\sum_{j} C_j w_j e^{a_j t} e^{ i b_j t}} = \infty \end{align*} \textbf{Part (c)}\\ For $x_0= \sum_{j} C_j w_j, w_j \in E_u $, $ e^{At}x_0 = \sum_{j} C_j w_j e^{a_j t} e^{ i b_j t}, a_j > 0$. Hence, we can say, \begin{align*} \lim\limits_{t \rightarrow -\infty } e^{At}x_0 &= \sum_{j} C_j w_j \lim\limits_{t \rightarrow - \infty } e^{a_j t} e^{ i b_j t} = 0 \\ \lim\limits_{t \rightarrow \infty } \abs{ e^{At}x_0 } &= \lim\limits_{t \rightarrow \infty } \abs{\sum_{j} C_j w_j e^{a_j t} e^{ i b_j t}} = \infty \end{align*} \textbf{Part (d)}\\ For $x_0= \sum_{j} C_j w_j, w_j \in E_s $, $ e^{At}x_0 = \sum_{j} C_j w_j e^{a_j t} e^{ i b_j t}, a_j < 0$. Hence, we can say, \end{homeworkProblem} \begin{homeworkProblem} \textbf{Part (a)}\\ \textbf{Part (b)}\\ As we are working with linear systems, any linear combination of the solutions will be linear too. We know that the columns of the fundamental matrix $ X $ are solutions to the ODE, and that they span the solution space. Right multiplying $ X $ with a constant, non-singular matrix $ C $ will give us a matrix which will have linear combination of the columns of $ X $ (ie the solutions to the ODE) according to the entries in $ C $. Due to the condition of non-singularity, this new matrix will also have linearly independent columns. Hence, the new matrix $ Y(t) = X(t) C $ will have columns which are solutions to the ODE and also span the solution space. Hence Proved. Left multiplying $ X $ with a constant, non-singular matrix $ B $ will give us a matrix which will have linear combination of the \textit{rows} of $ X $ (which are not the solutions to the ODE) according to the entries in $ B $. This means that $ B X $ will not, in general, be a fundamental matrix for the system. Of course, $ BX $ can be a fundamental matrix if the rows of $ X $ are indeed solutions to the system, ie. if $ X^T = X $. \\ \textbf{Part (c)}\\ From the fact that $ X_1, X_2 $ are fundamental matrices and from the previous part, we can make the following statement, \begin{equation*} X_2(t) = X_1(t) C \end{equation*} where $ C $ is a non-singular matrix. Consider, \begin{align*} X_2(t + \omega) &= X_2(t) B_2\\ X_1(t + \omega) C &= X_2(t) B_2\\ X_1(t) B_1 C &= X_1(t) C B_2\\ \implies B_1 &= C B_2 C^{-1} \end{align*} where inverses could be taken in the last step only because the matrices are known to be nonsingular. Hence proved that $ B_1, B_2 $ are similar. \end{homeworkProblem} \end{document}
{ "alphanum_fraction": 0.6887529289, "avg_line_length": 34.2946428571, "ext": "tex", "hexsha": "626b6761f1bc404b465c3fb7853e9931c3ef0c74", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c0aebb67332ccf0b116a3348923ab2631b586dac", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adivijaykumar/courses", "max_forks_repo_path": "sem2/dysys/hw2/hw2.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c0aebb67332ccf0b116a3348923ab2631b586dac", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adivijaykumar/courses", "max_issues_repo_path": "sem2/dysys/hw2/hw2.tex", "max_line_length": 673, "max_stars_count": null, "max_stars_repo_head_hexsha": "c0aebb67332ccf0b116a3348923ab2631b586dac", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adivijaykumar/courses", "max_stars_repo_path": "sem2/dysys/hw2/hw2.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2702, "size": 7682 }
\vsssub \subsubsection{Grid Integration} \label{sub:ww3gint} \vsssub \proddefH{ww3\_gint}{w3gint}{ww3\_gint.ftn} \proddeff{Input}{ww3\_gint.inp}{Traditional configuration file.}{10} (App.~\ref{sec:config111}) \proddefa{mod\_def.*}{Model definition files in \ws\ format for base and target grids}{20} \proddefa{out\_grd.*}{Gridded field files in \ws\ format for base grids}{30+} \proddeff{Output}{standard out}{Formatted output of program.}{6} \proddefa{out\_grd.*}{Gridded field files in \ws\ format for target grid}{30+} \vspace{\baselineskip} \noindent This post processor program takes field data from several overlapping grids and produces a unified output file. The different model definition and field output files are identified by the unique identifier associated with each specific grid. At this moment the program works with curvilinear and rectilinear grids. A weights file {\file WHTGRIDINT.bin} is written that can be read in subsequent runs using identical origin-destination grids, saving substantial time in cases using large number of input grids and/or high-resolution target grids. \vspace{\baselineskip} \vspace{\baselineskip} \noindent Note that this program can be used in concert with the grid splitting program {\file ww3\_gspl}, and that {\file ww3\_gspl.sh} has an option to produce a template input file for his program (see \para\ref{sub:ww3gspl}). \pb
{ "alphanum_fraction": 0.7822638789, "avg_line_length": 46.2333333333, "ext": "tex", "hexsha": "fb7a14647a9b813f6e20998d6191b433f1720570", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-06-01T09:29:46.000Z", "max_forks_repo_forks_event_min_datetime": "2021-06-01T09:29:46.000Z", "max_forks_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_forks_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_forks_repo_name": "minsukji/ci-debug", "max_forks_repo_path": "WW3/manual/run/ww3_gint.tex", "max_issues_count": 5, "max_issues_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_issues_repo_issues_event_max_datetime": "2021-06-04T14:17:45.000Z", "max_issues_repo_issues_event_min_datetime": "2021-05-31T15:49:26.000Z", "max_issues_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_issues_repo_name": "minsukji/ci-debug", "max_issues_repo_path": "WW3/manual/run/ww3_gint.tex", "max_line_length": 95, "max_stars_count": null, "max_stars_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_stars_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_stars_repo_name": "minsukji/ci-debug", "max_stars_repo_path": "WW3/manual/run/ww3_gint.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 382, "size": 1387 }
\chapter{The storage layer} \label{testing:storage}
{ "alphanum_fraction": 0.7884615385, "avg_line_length": 17.3333333333, "ext": "tex", "hexsha": "bcda786eb26d5153c9357086f38ad676639458ab", "lang": "TeX", "max_forks_count": 66, "max_forks_repo_forks_event_max_datetime": "2022-03-28T14:52:28.000Z", "max_forks_repo_forks_event_min_datetime": "2018-12-04T10:01:42.000Z", "max_forks_repo_head_hexsha": "2c63c74f1134ec2fe1a0a9dec6d84645b46ba3bf", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "dizgotti/ouroboros-network", "max_forks_repo_path": "ouroboros-consensus/docs/report/chapters/testing/storage.tex", "max_issues_count": 3273, "max_issues_repo_head_hexsha": "2c63c74f1134ec2fe1a0a9dec6d84645b46ba3bf", "max_issues_repo_issues_event_max_datetime": "2022-03-30T18:04:56.000Z", "max_issues_repo_issues_event_min_datetime": "2018-10-22T12:30:39.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "dizgotti/ouroboros-network", "max_issues_repo_path": "ouroboros-consensus/docs/report/chapters/testing/storage.tex", "max_line_length": 27, "max_stars_count": 218, "max_stars_repo_head_hexsha": "2c63c74f1134ec2fe1a0a9dec6d84645b46ba3bf", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "dizgotti/ouroboros-network", "max_stars_repo_path": "ouroboros-consensus/docs/report/chapters/testing/storage.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-25T14:06:04.000Z", "max_stars_repo_stars_event_min_datetime": "2018-09-20T02:44:28.000Z", "num_tokens": 14, "size": 52 }
\chapter{General Introduction} As a researcher, you will spend a lot of time performing experiments to get data, and analyze the results to answer important questions. However, you will probably not work on only one data set but will obtain many throughout your career. You might know everything about your data set at this moment, but would you remember all the details in 5 years? \par Research data management is a term that described the organization, storage, preservation, and sharing of data collected and used in a research project. It involves the management of data during the lifetime of a project but also involves decisions about how data will be preserved and shared after the project is completed. Good management helps to prevent errors and increases the quality of your analysis. Data management saves time and resources in the long run, Furthermore, well-managed and accessible data allows others to validate and replicate your findings. Finally, by sharing data, it can lead to valuable discoveries by others outside of the original research team. Good data management is not easy and it can be a challenge to start, especially if your working in an ongoing project. \par In this document, we will go over the various topics that cover the whole research data life cycle. We will introduce the FAIR data principles, the importance of data documentation, data storage, and the best practices for sharing research software.
{ "alphanum_fraction": 0.8068728522, "avg_line_length": 161.6666666667, "ext": "tex", "hexsha": "08d3179e77d234e44299d276b574fdc3cb171fa4", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4cf9fe29e9aff9f65fafadefbf9462382193f1f9", "max_forks_repo_licenses": [ "FSFAP" ], "max_forks_repo_name": "TJMKuijpers/FAiRData", "max_forks_repo_path": "FAIR data guide/Introduction.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "4cf9fe29e9aff9f65fafadefbf9462382193f1f9", "max_issues_repo_issues_event_max_datetime": "2022-01-27T10:47:34.000Z", "max_issues_repo_issues_event_min_datetime": "2022-01-27T06:25:42.000Z", "max_issues_repo_licenses": [ "FSFAP" ], "max_issues_repo_name": "TJMKuijpers/FAiRData", "max_issues_repo_path": "FAIR data guide/Introduction.tex", "max_line_length": 358, "max_stars_count": null, "max_stars_repo_head_hexsha": "4cf9fe29e9aff9f65fafadefbf9462382193f1f9", "max_stars_repo_licenses": [ "FSFAP" ], "max_stars_repo_name": "TJMKuijpers/FAiRData", "max_stars_repo_path": "FAIR data guide/Introduction.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 276, "size": 1455 }
% interface=en \startcomponent co-en-01 \environment contextref-env \product contextref \chapter{Preface} This manual is about \CONTEXT, a system for typesetting documents. Central element in this name is the word \TEX\ because the typographical programming language \TEX\ is the base for \CONTEXT. People who are used to \TEX\ will probably identify this manual as a \TEX\ document. They recognise the use of \texescape. One may also notice that the way pararaphs are broken into lines is often better than in the avarage typesetting system. In this manual we will not discuss \TEX\ in depth because highly recommended books on \TEX\ already exist. We would like to mention: \startitemize[n] \item[texbook] the unsurpassed {\em The \TeX Book} by Donald E.~Knuth, the source of all knowledge and \TEX nical inspiration, \item[bytopic] the convenient {\em \TeX\ by Topic} by Victor Eijkhout, the reference manual for \TEX\ programmers, and \item[beginners] the recommended {\em The Beginners Book of \TeX} by Silvio Levy and Raymond Seroul, the book that turns every beginner into an expert \stopitemize For newcomers we advise (\in[beginners]), for the curious (\in[texbook]), and for the impatient (\in[bytopic]). \CONTEXT\ users will not need this literature, unless one wants to program in \TEX, uses special characters, or has to typeset math. Again, we would advise (\in[beginners]). You may ask yourself if \TEX\ is not one of the many typesetting systems to produce documents. That is not so. While many systems in eighties and nineties pretended to deliver perfect typographical output, \TEX\ still does a pretty good job compared to others. \TEX\ is not easy to work with, but when one gets accustomed to it, we hope you will appreciate its features, \blank[big] Hans Hagen, 1996||2002 \stopcomponent
{ "alphanum_fraction": 0.7664312873, "avg_line_length": 30.6833333333, "ext": "tex", "hexsha": "69fd6894b2665b68388637b9aced7cd015d9eb46", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "marcpaterno/texmf", "max_forks_repo_path": "contextman/context-reference/en/co-preface.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "marcpaterno/texmf", "max_issues_repo_path": "contextman/context-reference/en/co-preface.tex", "max_line_length": 63, "max_stars_count": null, "max_stars_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "marcpaterno/texmf", "max_stars_repo_path": "contextman/context-reference/en/co-preface.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 476, "size": 1841 }
\chapter{Ions}
{ "alphanum_fraction": 0.6470588235, "avg_line_length": 4.25, "ext": "tex", "hexsha": "1137b4d9e97da23b2052df7ec4b42d20f2efe077", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/physics/ions/00-00-Chapter_name.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/physics/ions/00-00-Chapter_name.tex", "max_line_length": 14, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/physics/ions/00-00-Chapter_name.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 7, "size": 17 }
\chapter{Experiments} \label{chapter:experiments} This chapter mainly focuses on answering the following questions: \begin{itemize} \item What are possible ways of using distantly supervised data? \item Can Distant Supervision improve upon the results of supervised training? \item Can a neural network architecture that is developed in a general domain setting be used to tackle the medical domain - a domain which is usually handled with lots of Natural Language Processing features? \item Which embedding types are best-suited for relation extraction in different domains? \end{itemize} The chapter is divided into two parts - one is dedicated to the supervised learning on manually labeled datasets and serves to demonstrate the baseline, while the second one contains experiments with Distant Supervision. Each of them is divided into a part for the general domain and a part for the medical domain. Overall two supervised datasets for each of the domains were evaluated and one dataset per domain is created for evaluating Distant Supervision. The scheme of all the experiments can be seen in the Figure \ref{fig:exp-schema}. \begin{figure} \centering \includegraphics[width=\textwidth]{chapter4_experiments/images/exp-schema.png} \caption[Experiments scheme]{Scheme of conducted experiments for the network evaluation.} \label{fig:exp-schema} \end{figure} In order to find the best network configuration, one 4-fold validation experiment was performed once for each domain. The parameters that were tuned were the length of the embeddings, both for words and for distances, and the type of the word embedding. The grid consisted of \{30, 40, 50, 70\} for distance embeddings length, \{300, 400\} for word embeddings length and \{Swivel, GloVe, Word2Vec\} for embedding types. In order to make the model universal, parameters for all the other experiments were fixed to the found via cross-validation. \section{Supervised training evaluation, general domain} The goal of experiments in this section is to validate the implementation of the network by comparing with the results achieved in \cite{DBLP:journals/corr/SantosXZ15} and prove the quality of the network by applying it to the other supervised dataset in general domain. The experiments in general domain use text corpora and relations from unspecific sources, such as Wikipedia, Freebase and news corpora. The biggest problem in the general domain setting is ambiguity. Named entities can have multiple meanings in different contexts and two entities can also belong to multiple relation types. Generally speaking, detecting one of the general domain relations is a challenging task even for humans. For example, given the entity pair ''The Lord of The Ring'' and ''The Return of the King'' it is hard to decide whether the relation between them is ''Member-Collection'' or ''Component-Whole''. \input{chapter4_experiments/experiments_supervised_general} \section{Supervised training evaluation, medical domain} These experiments were held to check the applicability of the ranking CNN to the medical domain. Thus several popular medical datasets for Relation Extraction were chosen and evaluation compared to existing results. There are a lot of relational classes specific only for medical domain. One of the most popular ones is about a general interaction between different entities, such as proteins, genes, drugs. Each interaction can have numerous more specific subclass relations, but for now tests are performed on generic relations. One more considered dataset contains \textit{treatment-for} and \textit{prevents-from} relations, that are very interesting by having high granularity level, i.e. they are very close by meaning. \input{chapter4_experiments/experiments_supervised_medical} \section{Distant supervision evaluation} The question to answer by these experiments is the possible quality of the model trained distantly. Also, possible ways of creating distantly supervised data are described. \input{chapter4_experiments/experiments_distant_general} \input{chapter4_experiments/experiments_distant_medical}
{ "alphanum_fraction": 0.8146081048, "avg_line_length": 73.5892857143, "ext": "tex", "hexsha": "e2623028f8f6f3536b675dfeb72e2b38d40e434f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b733bd4382371203cc4992571890619a2e314047", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Abas-Khan/thesis", "max_forks_repo_path": "thesis_text/chapter4_experiments/experiments.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b733bd4382371203cc4992571890619a2e314047", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Abas-Khan/thesis", "max_issues_repo_path": "thesis_text/chapter4_experiments/experiments.tex", "max_line_length": 306, "max_stars_count": null, "max_stars_repo_head_hexsha": "b733bd4382371203cc4992571890619a2e314047", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Abas-Khan/thesis", "max_stars_repo_path": "thesis_text/chapter4_experiments/experiments.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 858, "size": 4121 }
% Chapter 2 \chapter{Sentiment Analysis} % Main chapter title \label{sa} % For referencing the chapter elsewhere, use \ref{Chapter2} \lhead{Chapter 2. \emph{Sentiment Analysis}} % This is for the header on each page - perhaps a shortened title \section{Sentiment: A psychological viewpoint} \par Though emotion is a term used often, it has a very complex psychological background. Emotion can be described as a component process. There are five organismic subsystems in an individual \textit{viz. Information Processing, Support, Executive, Action, and Monitor} \citep*{scherer2010blueprint}. Like any system, these subsystems also have states and they keep on transiting between different states according to the environment. Individuals respond to stimuli in the environment. Whenever an individual encounters a stimuli, state of these subsystems change and these changes are both interrelated and synchronized. This episode is called an emotion \citep*{scherer2005emotions}. \par \textit{Affect} is the feeling or emotion experienced during or after the process of responding to a stimuli. This state of experience is called as \textit{Affective State}. Thus, emotion is one of the affective states. \textit{Mood} also can be considered as one of the affective states. \textit{Attitude}, is one of the most important affective states. \textit{Attitude} can be defined as \textit{`` enduring, affectively colored beliefs, dispositions towards objects or persons''}\citep*{scherer2005emotions}. \textit{Sentiment Analysis} is the nothing but the detection of attitude towards an entity. Intuitively, we can see that it is possible to infer even the emotion. We can determine whether the user is sad, happy, angry,\textit{etc,} if we know the attitude of the user. %---------------------------------------------------------------------------------------- \section{Formal Problem Definition for Sentiment Analysis} Before devising any solution to a problem, it is advisable to have concise definition of the problem first. \subsection{Example} Let us consider an example to define the problem, \textit{"1)I went to watch the new James Bond flick, \textbf{Skyfall} at \textit{IMAX} which is the best theater in Mumbai with my brother a month ago. 2)I really liked the seating arrangement over there. 3)The screenplay was superb and kept me guessing till the end. 4) My brother doesn't like the hospitality in the theater even now. 5) The movie is really good and the best bond flick ever"} \par This is a snippet of the review for a movie named \textbf{Skyfall}. There are many entities and opinions expressed in it. 1) is an objective statement. 2) is subjective but is intended for the theater and not the movie. 3) is a positive statement about the screenplay which is an important aspect of the movie. 4) is a subjective statement but is made by the author's brother and also it is about the hospitality in the theater and not the movie or any of its aspects. 5) reflects a positive view of the movie for the author. \par We can see from this example that not only the opinion but the opinion holder and the entity about which the opinion has been expressed are also very important for overall SA. Also, as can be seen from 1),4) and 5) there is also a notion of time associated with every sentiment expressed. \subsection{Problem Definition} \textit{"A direct opinion (opinion about the object) is a quintuple \textbf{({o_{j}}, {f_{jk}}, {oo_{ijkl}}, {h_{i}}, {t_{l}})}, where \textbf{o_{j}} is an object, \textbf{f_{jk}} is a feature of the object \textbf{o_j}, \textbf{oo_{ijkl}} is the orientation or polarity of the opinion on feature \textbf{f_{jk}} of object \textbf{o_{j}}, \textbf{h_{i}} is the opinion holder and \textbf{t_{l}} is the time when the opinion is expressed by \textbf{h_{i}} "}\citep*{liu2010sentiment}. \par Thus we can see that the definition of sentiment analysis takes into account not only the object, opinion, and opinion holder but also the time and the specific feature about the sentiment is being expressed. This definition plays a very important role in devising any approach to solve any problem related to sentiment analysis. \section{Types of Sentiment Analysis} \par Sentiment analysis is primarily a classification task. But, we can also classify the task of sentiment analysis depending upon various features. These features or dimensions are shown in figure \ref{fig:dimensions}, \begin{figure}[ht] \caption{Dimensions of a Sentiment Analysis problem} \label{fig:dimensions} \includegraphics[width=\textwidth]{SentimentAnalysisProblemClassification.png} \end{figure} \clearpage \begin{enumerate} \item Granularity of Text \item Type of text \item Algorithm \item Language \item Representation \item Nature of Output \end{enumerate} All these features collectively characterize a particular problem in SA. A change in even one of the features will change the problem. \section{Challenges} \textit{SA} is a complex problem and has many challenges involved. In this section, an attempt is made to discuss some of the most notorious difficulties in \textit{SA}. \subsection{Unstructured Text} Text in micro-blogs, tweets, comments, and messages is unstructured. Most of the research in \textit{NLP} and many \textit{NLP} tools focus on structured data. To adapt and use these tools for \textit{SA} is a big challenge. \subsection{Sarcasm} Nowadays, many tweets and comments are sarcastic. Let us see an example on tweet, \textit{"Great! I ate too many chocolates and gained lot of weight :)"}. This sentence will be marked as positive by almost any classifier. But, we can clearly see that this is not a positive statement. Correctly, classifying such sentences will require context knowledge. \subsection{Thwarting} In a thwarted expression, the sentences which contradict the overall polarity of the document are in majority. An example is, \textit{"The guy is a chronic drinker, he smokes weed, has drugs but is a good guy"}. The aim of thwarted expressions is to mislead the classifier. Detecting thwarted expressions is a very important difficulty. \section{Applications of Sentiment Analysis} We list some of the most important applications of \textit{SA}. \begin{enumerate} \item Classification of Tweets \item Classification of Movie Reviews \item Classification of Product Reviews \item Analyzing market trends \item Sentiment Aware Information Retrieval \item Removing subjective sentences to improve IR performance \end{enumerate} %---------------------------------------------------------------------------------------- \par As we have discussed earlier, \textit{SA} is mainly a classification task. It can be thought of as a subset of a another important classification task called \textit{Text classification}. Various approaches have been used to solve this problem. Most of them are machine learning based. This chapter takes a look at basic machine learning and some models in brief. Later on, combination of these models with other techniques which take into consideration the various aspects of the \textit{text} are considered. \section*{Approaches to Sentiment Analysis} \section{Machine Learning} \par This section aims not to explain all the intricacies associated with machine learning but to provide a brief outline which suffices for understanding future concepts. Machine learning by definition is learning from data. We have a mathematical model, parameters of which have to be estimated from the data. By doing this we fit the model to the data provided. These parameters which have been \textit{learned} from the data can be said to completely define the model. Now this \textit{learned} model can be used for prediction or classification. In the case of \textit{SA}, \textit{Machine Learning} is used for classification of \textit{text} as either positive, negative or neutral. \par The data available to us can be both labeled and unlabeled. By labeling we mean that the for an instance of the input we know its class. Data labeling can also be called annotation. Labeled data is called annotated data. Depending upon the extent to which the training data is labeled, we can classify the \textit{ML} techniques as follows. \subsection{Supervised} In this case, all the training data is a labeled. Majority of ML based techniques have this requirement that the data should be completely labeled. The accuracy of the system decreases if the data is very small in size. This is called data sparsity problem. They tend to over-fit if the data size is small. \subsection{Unsupervised} Here, the data is completely unlabeled. As opposed to supervised techniques, they do not suffer from data sparsity problem as the input in unlabeled. \subsection{Semi-supervised}\label{subsection:semisupervised} In this we have a mixture of both labeled and unlabeled data. Using the labeled data we try to annotate the unlabeled data. This technique is very useful if we have very sparse data. \section{Feature Vector} \par Feature vector can be thought of as a way to represent the input. Some important aspects of the input are considered and used to represent it in the form of a vector of values. These values contain some important information which aids the classification algorithm. \section{Models used for classification} \par When we say we learn from the data, we actually train the model to do so. There are broadly two ways to do this. One is in a generative way and the other is discriminative. What do we actually mean by this? We want to infer the class of a \textit{text}. \par Let \textit{x} represent the input \textit{text}. We want to determine the class \textit{y} given the input \textit{i.e}, we are interested in modeling \(p(y\mid x)\). There are three ways to do this. \subsection{Generative Models} \par One way is to model p(x, y) directly. Once we do that, we can obtain \(p(y \mid x)\) by simply conditioning on x. And we can then use decision theory to determine class membership \textit{i.e}, we can use loss matrix, \textit{etc}. to determine which class the point belongs to (such an assignment would minimize the expected loss). We can learn p(y), the prior class probabilities from the data. We can also learn \(p(x\mid y)\) from the data using say maximum likelihood estimation (or we can Bayes estimator, if you will). Once you have p(y) and \(p(x\mid y)\), p(x, y) is not difficult to find out. Bayes' rule is given as follows, \begin{equation} p(y\mid x) = \frac{p(x\mid y)p(y)}{p(x)} \end{equation} \subsection{Discriminative Models} Instead of modeling p(x, y), we can directly model \(p(y\mid x)\), for \textit{e.g}, in logistic regression \(p(y\mid x)\) is assumed to be of the form, \begin{equation} p(y\mid x) = \frac{1}{1 + \mathrm{e}^{(-\Sigma(wi.xi))}} \end{equation} All we have to do in such a case is to learn weights that would minimize the squared loss. \par There is a very easy technique to tell whether a model is generative or discriminative. If we can a generate new training data using the model then it is certainly generative. In the case of generative models, the distribution \(p(x\mid y)\) is a model which fits the training data so it can be used to generate new data. In the case of discriminative models, this is not the case. \subsection{Encoding a function} We find a function f(.) that directly maps x to a class. The best example of this is decision trees. \section{Models} Here, we explain the various machine learning models used. \subsection{Naive Bayes classifier} \par Naive Bayes classifier is a generative classifier with strong independence assumptions. This means that all the features in feature vector are independent of each other given the class. Despite of this very naive assumption, it gives surprisingly good results. The parameters are estimated using the method of maximum likelihood. Suppose we want to determine value of the class variable \textit{C} of the given input consisting of feature variables \textit{F_1,\dots,F_n}, it can be expressed as a conditioned probability \(p(C\mid F_1,\dots,F_n)\) using the Bayes' theorem, \begin{equation} p(C\mid F_1,\dots,F_n) = \frac{p(C)p(F_1,\dots,F_n\mid C)}{p(F_1,\dots,F_n)} \end{equation} To use this equation for classification of a given input \textit{x} which is represented by a feature vector \textit{(F_1=f_1,\dots,F_n=f_n)}, the following equation is used, \begin{equation} class(f_1,\dots,f_n) = \arg\max_c p(C=c) \prod_{i=1}^{n} p(F_i = f_i \mid C=c) \end{equation} \par The main advantage of Naive Bayes is that it gives remarkably high accuracy for relatively small data sets because of the independence assumption. \subsection{Maximum Entropy} \par Maximum Entropy more commonly known as \textit{MaxEnt} is a discriminative classifier. As it is a discriminative classifier, here we find out the conditional distribution of the class variable. The basic principle underlying maximum entropy is that without external knowledge one should prefer distributions which are uniform and have maximum entropy. Using the training data, constraints on the distribution are derived which can be used to infer where the distribution should be minimally non-uniform. These constraints represent the expected values of the features \citep*{nigam1999using}. In Text classification, \textit{MaxEnt} estimates the conditional distribution of the class label given the \textit{text}. Representation of the \textit{text} is mostly in terms of word count features or word presence features. Let \textit{f} be the features that link the observation \textit{x} to the class \textit{c}. A feature in this case is a function denoted by \textit{f_i(x,c)} with a bounded real value. Also, let \textit{X} denote the collection of \textit{texts}. The aim of \textit{MaxEnt} is to restrict the model distribution to have the same expected value for each such feature as is seen in the training data \textit{X}. This means that the learned conditional distribution \(p(c \mid x)\) must satisfy the following property, \begin{equation}\label{eqn:property} \frac{1}{|X|} \sum_{x \in X} f_i(x, c(x)) = \sum_x p(x) \sum_c p(c \mid x)f_i(x,c) \end{equation} equation ~\ref{eqn:property} reduces to the following form as we are not interested in modeling the collection \textit{X} here. \begin{equation}\label{eqn:newproperty} \frac{1}{|X|} \sum_{x \in X} f_i(x, c(x)) = \frac{1}{|X|} \sum_{x \in X} \sum_c p(c \mid x)f_i(x,c) \end{equation} Feature identification is very important in this case. Using the training data, expected value of features is used as a constraint for the model distribution. A class label for which most of these constraints are satisfied is the class of the given input \textit{x}. \subsection{SVM} A basic \textit{SVM} is a non-probabilistic binary linear classifier which given an input data predicts which of the two possible classes forms the output. \section{Usage in SA} We have covered lot of basics to easily understand some of the techniques used for SA. We start with a basic bag of words model and then move on to more advanced techniques which incorporate the attributes of \textit{text}. Discourse based technique is discussed followed by a technique which makes use of minimum cut of a graph. These are followed by an unsupervised method which makes use of a search engine called \textit{Alta Vista} to determine the semantic orientation of the \textit{text}. The last method we discuss is a semi-supervised method which aims to perform ternary classification of sentences. \subsection{Bag of Words} In a Bag of Words model, the feature vector is just a unigram model which is used to represent the presence or absence of a word. Let us consider an example sentence, \textit{"I hate to play football"} and suppose the vocabulary \textit{V} is \(\left\{I, You, like, hate, to, for, play, dance, football, cricket\right\}\). In this case, the feature vector will be \textit{(1,0,0,1,1,0,1,0,1,0)}. We can see that here every word is a feature. In addition, it makes use of list of positive and negative words. If a word is positive then the value corresponding to that feature is +1 and if it is negative the value is -1. Thus for the example is our case, the feature vector after making use of this dictionary becomes \textit{(1,0,0,-1,1,0,1,0,1,0)}. This feature vector is nothing but a representation of the input. If sum of all the values is positive then the sentence has positive polarity and if it is less than zero then it has negative polarity. For our example, it turns out that the sentence is negative. The accuracy of such a system though is not very good, around 65 \%. On the other hand, If this input is fed to a default classifier like \textit{NB}, \textit{SVM} or \textit{MaxEnt} then it is shown to have a considerable increase in accuracy\citep*{go2009twitter}. In \citep*{go2009twitter}, they conducted various experiments and got the results as shown in table \ref{table:classifierAccuracy} \begin{table} \caption{Classifier Accuracy} \begin{center} \begin{tabular}{| c | c | c | c | c | } \hline Features & Keywords & Naive Bayes & MaxEnt & SVM \\ \hline Unigram & 65.2 & 81.3 & 80.5 & 82.2 \\ \hline Bigram & N/A & 81.6 & 79.1 &78.8 \\ \hline Unigram + Bigram & N/A & 82.7 & 83.0 & 81.6 \\ \hline Unigram + POS & N/A & 79.9 & 79.9 & 81.9 \\ \hline \end{tabular} \end{center} \label{table:classifierAccuracy} \end{table} \subsection{Adding Discourse information} \par Discourse elements have a sentiment changing impact on sentences. Let us consider an example, \begin{center} \textit{"The screenplay was good but I didn't like the movie"}\end{center} \par Feature vectors discussed in the previous section won't be able to encode the information in such sentences. Using Bag of Words model can result in classification to a completely opposite polarity. To detect the polarity of such a sentence, detection of discourse elements and determining their effect is very important. Many approaches for this are present but all of them are for structured data and on most of the micro-blogging sites, the content is unstructured. Unstructured data is the main reason that many sentiment analysis tools make use of bag-of-words model. \par To solve this problem, it is important to categorize the various types of discourse relations and find out the semantic operators which play a major role. In \citep*{mukherjeesentiment}, discourse relations have been categorized and many examples have been given to emphasize on some specific relation. Also, the semantic operations influencing the polarity have been explained. The algorithm then takes into consideration all these attributes to create a feature vector. A weight is assigned to each valence shifter taking into consideration its position w.r.t the discourse element. Also, the polarity/sense of a particular word is flipped depending upon its position. It also makes an attempt to take into account the impact of modals, which should lower the weight in some cases. The feature vector thus consists of weight, polarity, flipping and modality values for each word in the sentence. Words having zero weight have do not affect the polarity in any way and are thus ignored while calculation. The Feature vector thus created can be used for calculation the sentiment behind the sentence. Two methods have been used, one is Lexicon based classification and the other is \textit{SVM} classification. In the \textit{SVM} classification, words with the same root are represented with a single vector. Also, special handling of \textit{emoticons} is present. \textit{WSD} is also used to determine the exact sense of each word. The results show that this algorithm outperforms all the methods by a margin which has statistically significant. Also, since this is lightweight and extends the baseline bag-of-words model, the performance of the system is very good. \subsection{Influence of Objective sentences on Classification}\label{section:subanalysis} In \citep*{pang2004sentimental}, they have attempted to classify movie reviews as positive or negative. In this case the granularity of the \textit{text} is a \textit{document}. Movie reviews often contain description of the plot. This description might contain polar sentences but they have no relation whatsoever with the review about the movie. These sentences don't help in describing about how good or bad the movie is. Consider the following example sentence from the review of a recent movie, \textit{"The action follows Jean Valjean, a former convict, as he flees Javert, a fanatical police officer, through a version of 19th-century France peopled with various grotesques, victims and tarnished saints."} As we can see this sentence has a number of words with negative polarity. This will be classified as a negative sentence which might lead to the review as a whole being classified as negative. But, we know that this sentence is from the description of the plot and is not an opinion/review about the movie. To solve this problem, we need to identify which sentences are objective and discard them. Subjective sentences in this scenario mean those sentences which are meant to describe the plot or contain some factual information. The approach followed in \citep*{pang2004sentimental} has three steps, \begin{enumerate} \item Label the sentences in the document subjective or objective \item The objective sentences are discarded. This extraction is based on minimum cut formulation which integrates inter-sentence contextual information with Bag of Words model \item A standard machine learning approach is applied to the extract for polarity classification \end{enumerate} \subsubsection*{Subjectivity Detection} This is the first step in their approach. They try to identify subjective sentences. For this, they have made use of cut-based classification. Also, coherence of sentences is also taken into consideration. Coherence means that subjectivity status of two sentences close to each other may be same. \textit{"I really loved the screenplay of this one. Award Winning Directors are often good at making such movies"} As we can see from this example, two coherent sentences should ideally be classified under the same class. \subsubsection*{Cut-based classification} Let \(x_1,\dots,x_n\) be the sentences in the \textit{document}. Also, let \(C_1,\dots,C_k\) be the classes into which the sentences are to be classified. There are two important sources of information which can aid the classification. \begin{enumerate} \item Individual scores: \(ind_i(x_i)\) - Preference of each \(x_i\) for being in class \(C_j\) \item Association scores: \(asso(x_i , x_j)\) - Estimate of how important it is that \(x_i\) and \(x_j\) are in the same class \end{enumerate} So, in the cut based classification penalizes if tightly associated items are put in different classes. So, taking into consideration, the objective is to minimize the cost given below. \begin{equation} \sum_{x\in C_1} ind_2(x) + \sum_{x\in C_2} ind_1(x) + \sum_{x_i \in C_1, x_k \in C_2} assoc(x_i,x_k) \end{equation} Now, we can see that this problem in intractable as there are 2^n possible partitions of x_i's. This minimization problem can be solved by building an undirected graph \(G\) with vertexes \({ v_1, v_2 , v_3 ,\dots, v_n , s, t }\) . The last two are source and the sink and represent the two classes The graph consists of following edges \begin{enumerate} \item \(n\) edges \((s, v_i )\) with weight \(ind_1 (x_i )\) \item \(n\) edges \((t, v_i )\) with weight \(ind_2(x_i)\) % \item \( n \choose 2 \) edges \((v_i , v_k )\) each with weight \(assoc(x_i , x_k )\) \end{enumerate} A cut \((S, T)\) of \(G\) is a partition of its nodes into sets \(S = {s} \cup S\) and \(T = {t} \cup T\) where \(S\) does not contain \(s\) and \(T\) does not contain \(s\). Its cost \(cost(S, T )\) is the sum of the weights of all edges crossing from \(S\) to \(T\). A minimum cut of \(G\) is one of minimum cost. \subsubsection*{Polarity classification} Using the cut-based classification, we get the subjective sentences in the movie review. This extract is the fed to default polarity classifiers which in this case are \textit{NB} and \textit{SVM}. The feature vector used for them is unigram based. \subsubsection*{Significance} A cleaner document can be obtained by extracting only the subjective information. The accuracy of the polarity classification improves as they don't get irrelevant data.The performance of the system will improve as it has less text to work on. \subsection{Unsupervised semantic orientation} A completely unsupervised approach to SA has been used in \citep*{turney2002thumbs}. It is aimed for classification of reviews about products, automobiles, review, \textit{etc}. The approach used has three steps. \begin{enumerate} \item Find phrases in the review containing adjectives and adverbs \item Find semantic orientation of such phrases \item Take avg of semantic orientation. If it is positive then \textit{Thumbs up} else \textit{Thumbs down} \end{enumerate} \subsubsection*{Step 1} Extract phrases containing adjectives or adverbs. Adjectives are good indicators of subjectivity. Adjectives in isolation have insufficient context to determine semantic orientation. \textit{e.g}, \textit{“unpredictable”} when applied to \textit{“steering”} in a car review has negative semantic orientation. On the other hand \textit{“unpredictable plot”} in a movie review has positive orientation. Therefore the algorithm uses 2 consecutive words where one is adjective/adverb and the other provides context. \subsubsection*{Step 2} \subsubsection*{Point-wise Mutual Information (PMI)} Point-wise Mutual Information between two words word1 and word2 is defined as \begin{equation} PMI(word_1,word_2) = log_2 \Bigg[ \frac{p(word_1 \wedge word_2)}{p(word_1)p(word_2} \Bigg] \end{equation} where \(p(word_1 \wedge word_2 )\) is the probability that \(word_1\) and \(word_2\) co-occur. \subsubsection*{Semantic Orientation} Semantic orientation of a phrase is given by, \begin{equation} \label{eqn:SO} SO(phrase) = PMI(phrase, excellent) - PMI(phrase,poor) \end{equation} The \textit{PMI} are estimated by issuing queries to a search engine which explains the \textit{IR} in \textit{PMI-IR}. It notes the number of hits to calculate the probability values. The search engine used in this case was \textit{AltaVista}. After some algebraic simplifications and using the fact that \(p(phrase,excellent) = \textit{hits(phrase NEAR excellent)} \), equation \ref{eqn:SO} reduces to the following form: \begin{equation} SO(phrase) = log_2 \Bigg[ \frac{\textit{hits(phrase NEAR excellent)hits(poor)}}{\textit{hits(phrase NEAR poor)hits(excellent)}} \Bigg] \end{equation} Here, \textit{hits(query)} stands for the number of hits returned by the query. \subsubsection*{Step 3} In this step, average of the semantic orientations of all the phrases is taken. \begin{itemize} \item If the avg is positive then Thumbs Up. \item If the avg is negative then Thumbs Down. \end{itemize} \subsubsection*{Observations} After experiments were conducted, following observations were made. \begin{itemize} \item Movie Reviews are hard to classify because a good movie might contain unpleasant scenes. Description of such scenes might decrease the semantic orientation. \item Accuracy did not increase just by accounting for this bias using a constant value. Just as positive reviews have description of unpleasant scenes, negative reviews might contain description of pleasant scenes. \end{itemize} \subsubsection*{Limitations} This approach has some limitations as can be seen from the architecture. \begin{itemize} \item Queries search engine to calculate semantic orientation of each phrase. \item Every phrase is given equal importance. There should be a weighted sum. \item The performance in case of movie reviews is not good because it does not take into account the fact that the whole is not necessarily a sum of parts as pointed out in section. \ref{section:subanalysis} \end{itemize} \subsection{Semi-supervised} A semi-supervised approach for detecting term orientation was used in \citep*{esuli2006determining}. In \citep*{esuli2006determining}, they have tried to perform a ternary classification of terms as objective, positive, or negative. The motivation behind this study is that in most works which determine the term orientation, it is assumed that we already know whether it a subjective term or not. So, we assume that a lexical resource of subjective/objective terms is available. This is not the case. Semi-supervised was discussed briefly in section \ref{subsection:semisupervised}. Semi-supervised can be depicted as shown in figure 2.1 \includegraphics[width=\textwidth]{semisupervised} \begin{center} Figure 2.1 Training Data in semi-supervised learning \end{center} The unlabeled data has to be labeled and it should also be used for training. The labeled usually is called as seed set. In \citep*{esuli2006determining}, they have made used of synonymy-antonymy relations of the wordnet to create the training data. \includegraphics[width=\textwidth]{synonymyantonymyrelation} \begin{center} Figure 2.2 Using synonymy-antonymy relation for labeling unlabeled data \end{center} As we can see, we start with very small seed sets in this case, each containing only one element. The exact procedure is shown in the figure 2.3. \includegraphics[width=\textwidth,height=0.6\textwidth]{procedure} \begin{center} Figure 2.3 Procedure for labeling unlabeled data using synonymy-antonymy relation \end{center} \par After getting all the training data, a textual representation for each term is generated by collating all the wordnet glosses for that term. A cosine normalized \textit{TF-IDF} representation is used as the feature vector. The result obtained by the approaches used in this paper were not that accurate and they show that algorithm used for term orientation when used for ternary classification perform badly and need improvement. \section*{SUMMARY} In this chapter we introduced \textit{Sentiment Analysis}. The motivation behind this work was discussed. A psychological viewpoint of \textit{SA} was explained. A formal problem definition of sentiment analysis was given. This was followed by depicting the various dimensions of a problem in \textit{SA}. Prevalent challenges in sentiment analysis were discussed briefly and some major applications were listed. Then we started with the basics of Machine Learning. Then we explained the various approaches and techniques used in ML. We explained what is a feature vector. Moving forward, we tried to understand the different models used in ML. Works using all these techniques were explained. In the next chapter we will discuss information retrieval in brief and the focus on corpus models, mainly \textit{LDA}. \clearpage
{ "alphanum_fraction": 0.7730514682, "avg_line_length": 57.0073260073, "ext": "tex", "hexsha": "ea54cb5984480275256d07fc1ad7ccf0ae9da2b0", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3b1b8b0b3b33d3728f000a4260aa2e264df39079", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "nikolodien/Paper-submissions", "max_forks_repo_path": "Report/tex_files/Chapters/sa.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "3b1b8b0b3b33d3728f000a4260aa2e264df39079", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "nikolodien/Paper-submissions", "max_issues_repo_path": "Report/tex_files/Chapters/sa.tex", "max_line_length": 189, "max_stars_count": null, "max_stars_repo_head_hexsha": "3b1b8b0b3b33d3728f000a4260aa2e264df39079", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "nikolodien/Paper-submissions", "max_stars_repo_path": "Report/tex_files/Chapters/sa.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 7596, "size": 31126 }
\subsection{The reflexive construction}\label{refl} Yakkha does not have reflexive pronouns. The reflexive is constructed by a complex predicate with the V2 \emph{-ca} \rede{eat}. It indicates that the A and P argument of the predicate have identical reference. The resulting verb gets detransitivized with regard to case and person marking, as shown in \Next. This construction can only express complete coreferentiality, so that propositions like \rede{I saved us} can neither be expressed by the verbal morphology nor by the reflexive derivation in Yakkha. The various other functions of this V2 are treated in \sectref{V2-eat}. Apart from reflexive constructions, it also occurs in many lexicalized predicates with classical middle semantics, such as grooming and social interactions. Some of the labile verbs that are discussed in \sectref{labile} also show reflexive semantics when they are inflected intransitively, without attaching the reflexive marker. \exg. nda (aphai) moŋ-ca-me-ka=na\\ \sc{2sg} (self) beat-\sc{V2.eat-npst-2=nmlz.sg}\\ \rede{You beat yourself.} The examples below show that the reflexive can also apply to a quantified noun phrase \Next[a], to a question pronoun \Next[b], and to negated propositions \Next[c]. There are no dedicated negative pronouns in Yakkha; negation is constructed by a question pronoun with the additive focus clitic \emph{=ca} and verbal negation markers. In irrealis (question and negation) contexts, both singular and nonsingular inflection is possible, depending on the potential referents that the speaker has in mind (or what the speaker assumes the addressee has in mind). \ex. \ag.ghak sar n-so-ca-ya\\ all teacher {\sc 3pl}-look-{\sc V2.eat-pst}\\ \rede{All teachers looked at themselves.} \bg.\label{uchik}isa u-chik ekt-a-ca-ya=na \\ who {\sc 3sg.poss}-hate make\_break{\sc [3sg]-pst-V2.eat-pst=nmlz.sg} \\ \rede{Who is angry at himself?} \bg.isa=ca n-so-ca-ya-n=ha=ci \\ who{\sc =add} {\sc neg}-look-{\sc V2.eat-pst-neg=nmlz.nsg=nsg} \\ \rede{Noone looked at themselves.} In three-argument verbs, there are two potential candidates for coreference with A. Whether A controls G or T is a matter of the original frame of the verb. For double object verbs, coreference with T is ungrammatical (see \Next[a]), while coreference with G is fine (see \Next[b]).\footnote{It seems crosslinguistically unexpected that the coreference of A and G is accepted, while the coreference of A and T is ungrammtical. Kazenin states the implicational universal that \rede{[...] if a language allows verbal marking of indirect reflexives, it allows verbal marking of direct reflexives as well.} \citep[918]{Kazenin2001_Verbal}. } It is not possible for T and G to be coreferential in the reflexive derivation, i.e. to express propositions like \rede{I showed him to himself (in the mirror)}. \ex. \ag. *ka ama (aphai phoʈo=be) soʔmen-ca-me-ŋ=na\\ \sc{1sg[erg]} mother (self photo\sc{=loc}) show-\sc{V2.eat-npst-1sg=nmlz.sg} \\ Intended: \rede{I show myself to mother (on the photo).} \bg. ka (aphai) cokleʈ pin-ca-me-ŋ=na\\ \sc{1sg[erg]} (self) sweet give-\sc{V2.eat-npst-1sg=nmlz.sg}\\ \rede{I give myself a sweet.} As for three-argument verbs of the indirective frame, A can be coreferential with the argument in the nominative. This is illustrated by the verb \emph{thumma} \rede{tie to}. In \Next[a] the frame is shown for comparison. Example \Next[b] shows the reflexive, where the locative G argument is retained and the nominative T argument is coreferential with A, and thus unexpressed. The A argument changes its case marking from ergative to nominative. \ex. \ag.a-ppa=ŋa on siŋ=be thund-u=na\\ {\sc 1sg.poss}-father{\sc =erg} horse tree{\sc =loc} tie{\sc -3.P[pst]=nmlz.nsg} \\ \rede{My father tied the horse to the tree.} \bg.a-nuncha siŋ=be thun-ca-meʔ=na\\ {\sc 1sg.poss}-younger\_sibling tree\sc{=loc} tie-\sc{V2.eat[3sg]-npst=nmlz.sg}\\ \rede{My brother ties himself to a tree.} Reflexivization is also possible with verbs of the experiencer-as-possessor frame (cf. \sectref{exp} and \sectref{nv-comp-poss}), as shown above in example \ref{uchik}. In complex sentences, e.g. in embedded complement clauses, the reflexive V2 can mark the main verb, although the reflexive semantics actually apply to the predicate in the embedded clause (see \Next).\footnote{The complement-taking predicate \emph{miʔma} means \rede{want} when it takes infinitival complements; its meaning in other constructions is \rede{hope, like, think} (see also Chapter \ref{compl}).} This is, however, only possible in the type of complement construction that embeds infinitives, where the embedded and the main clause S/A argument are necessarily coreferential.\footnote{Note that the ergative on A is retained in this construction, which suggests that the A argument belongs to the embedded transitive clause (a case of backward control).} \exg. uŋ=ŋa phoʈo cok-ma min-ca-meʔ=na\\ {\sc 3sg=erg} photo make{\sc -inf} want-{\sc V2.eat[3sg]-npst=nmlz.sg} \\ \rede{She wants to take a photo of herself.} \subsection{The reciprocal construction}\label{refl3} The reciprocal is constructed by attaching the suffix \emph{-khusa} to the stem of the lexical verb and employing the verb \emph{cama} \rede{eat} as auxiliary (see \Next[a]). The lexical verb and the auxiliary have to be adjacent, but the degree of morphological fusion is lower than in the reflexive construction and complex predication in general. Inflectional prefixes attach to the auxiliary, not to the lexical verb. As the reciprocal expresses mutual actions, it is characterized by at least two participants that both simultaneously have the role of actor and undergoer. The reciprocal participants are fused into one noun phrase. The construction only applies to transitive verbs, and it always formally detransitivizes the predicate, by assigning the nominative case to the A arguments and by inflecting the auxiliary intransitively, even when the lexical verb is a three-argument verb, as in \Next[b]: here, the G argument is coreferential with A and hence it is omitted, while the T remains on the surface, retaining the case marking of its frame of argument realization (unmarked nominative in the double object frame). Contexts where reciprocals of double object verbs have coreferential A and T arguments are hard to imagine, and those proposed were rejected (see ungrammatical \Next[c]). Inherently reciprocal verbs such as \emph{tupma} \rede{meet}, \emph{tuma} \rede{fight}, \emph{khima} \rede{quarrel} and \emph{cuŋma} \rede{wrestle} are intransitive in Yakkha; they do not permit the reciprocal operation. \ex. \ag. kanciŋ [...] sok-khusa=se ca-ya-ŋ-ci-ŋ\\ \sc{1du} [...] look-\sc{recip=restr} eat\sc{.aux-pst-excl-du-excl}\\ \rede{We (dual, excl) just looked at each other.} (A=P) \source{40\_leg\_08.070} \bg. kanciŋ phuŋ pi-khusa ca-me-ci=ha\\ \sc{1du} flower give-\sc{recip} eat\sc{.aux-npst-1du=nmlz.nsg}\\ \rede{We (dual, incl) give flowers to each other.} (A=G) \bg. *kanciŋ ama(=be) soʔmek-khusa ca-me-ci=ha\\ \sc{1du} mother(\sc{=loc}) show-\sc{recip} eat\sc{.aux-npst-[1]du=nmlz.nsg}\\ Intended: \rede{We showed each other to mother (e.g. on a photo).} (*A=T) The antecedent of the coreferential argument always has to be the agent, as with the reciprocal of \emph{nis} \rede{see, know}, yielding \rede{introduce, get to see/know each other} in \Next[a]. Coreferential T and G are possible, however, when the causative marker \emph{-met} is attached to the auxiliary, so that the reciprocal construction serves as input to a causative construction (see \Next[b]). The arguments that are fused into one noun phrase are the A and P arguments of the reciprocal construction, and simultaneously they are T and G arguments of the causative construction \emph{nikhusa cameʔma} \rede{introduce to each other}, which shows transitive person marking and ergative case marking on A. The causative verb \emph{nimeʔma}, without the reciprocal, also exists; it is a three-argument verb with the meaning \rede{introduce (X to Y)}. \ex. \ag. kanciŋ ni-khusa ca-me-ci=ha\\ \sc{1du} see/know-{\sc recip} eat{\sc .aux-npst-[1]du=nmlz.ns}\\ \rede{We will get to see/know each other.} (A=P) \bg. uŋ=ŋa uŋci ni-khusa ca-met-u-ci=ha\\ \sc{3sg=erg} \sc{3nsg} see/know-\sc{recip} eat\sc{.aux-caus-3.P[pst]-nsg.P=nmlz.nsg}\\ \rede{He introduced them (to each other).} ([[A=P.{\sc recip}], G=T.{\sc caus}]) In the indirective frame (characterized by locative or ablative marking on the G argument, see \sectref{three-arg-frame}), the reciprocal construction can express coreference of A and T or A and G, regardless of the case and agreement properties of the arguments in the corresponding non-reciprocal predicate. The possibilities are restricted only by the verbal semantics, i.e. whether the T or the G argument is animate/human and thus eligible for being coreferential with A. In \Next[a], the A argument is coreferential with T, while in \Next[b], A is coreferential with G. \ex. \ag. uŋci hoŋma=be luŋ-khusa ca-ya-ci=ha\\ \sc{3nsg} river{\sc =loc} drown-{\sc recip} eat{\sc .aux-pst-[3]du=nmlz.nsg}\\ \rede{They (dual) drowned each other in the river.} (A=T) \bg.uŋci yaŋ khu-khusa ca-me-ci=ha\\ \sc{3nsg} money steal{\sc -recip} eat{\sc .aux-npst-[3]du=nmlz.nsg}\\ \rede{They steal money from each other.} (A=G) In the secundative frame (characterized by instrumental marking on the T argument, see \sectref{three-arg-frame}), animate or human T arguments are hardly conceivable, and thus, only instances with coreferential A and G could be attested, as shown in \Next. \exg.ibebe n-juŋ-a-ma, ikhiŋ=ga tabek=ŋa ce-ŋkhusa n-ja-ya=em, barcha-ŋa hok-khusa n-ja-ya=em, luŋkhwak=ŋa lep-khusa n-ja-ya, ikhiŋ=ga bhuiʈar=ŋa ap-khusa n-ja-ya.\\ anywhere {\sc 3pl-}fight{\sc -pst-prf} so\_big{\sc =gen} khukuri\_knife{\sc =ins} cut{\sc -recip} {\sc 3pl-}eat{\sc .aux-pst=alt} spear{\sc =ins} pierce{\sc -recip} {\sc 3pl-}eat{\sc .aux-pst=alt} stone{\sc =ins} throw{\sc -recip} {\sc 3pl-}eat{\sc .aux-pst} so\_big{\sc =gen} catapult{\sc =ins} shoot{\sc -recip} {\sc 3pl-}eat{\sc .aux-pst=alt}\\ \rede{They fought so much, with knifes so big, whether they cut each other with knives, whether they stabbed each other with lances, they threw stones at each other, they shot each other with a really big catapult.} \source{39\_nrr\_08.21--2} Derived verbs can also serve as input to the reciprocal construction, as shown for the benefactive in \Next[a] and for the causative in \Next[b]. \ex. \ag.kanciŋ ʈopi pham-bi-khusa ca-me-ci=ha\\ \sc{1du} cap knit{\sc -V2.give-recip} eat{\sc .aux-npst-[1]du=nmlz.nsg} \\ \rede{We knit caps for each other.} \bg.kaniŋ cuwa=ŋa khoʔ-meʔ-khusa ca-i-wa\\ \sc{1pl} beer\sc{=ins} have\_enough\sc{-caus-recip} eat{\sc .aux-1pl-npst}\\ \rede{We serve each other beer.} (Lit. \rede{We make each other have enough beer.}) \subsection{The middle construction}\label{middle} Middle verbs are characterized by denoting an event that“affects the subject of the verb or his interests”, to take up the definition by \citet[373]{Lyons1969_Introduction}. Characteristic for a middle situation is the low elaboration of participants in an event \citep[3]{Kemmer1993_Middle}. Agent and patient have the same reference, just as in the reflexive. In the middle, however, agent and patient are less distinct conceptually, because many of the events do not presuppose a volitional agent. Volitionality is a crucial feature of a prototypical agent \citep{Hopperetal1980Transitivity, Foleyetal1984Functional}. Hence, the middle is semantically less transitive than a reflexive, but still more transitive than an intransitive verb \citep[73]{Kemmer1993_Middle}. The Yakkha middle is marked by \emph{-siʔ} , which behaves like a function verb, despite originating in a suffix, as comparison with other Tibeto-Burman languages shows (see \sectref{V2-mddl}). The distinctive semantic criterion of the middle marker \emph{-siʔ} in Yakkha is the low intentionality and volitionality on part of the subject. The middle derivation detransitivizes the verbs (compare \Next[a] and \Next[b]). With a few verbs, \emph{-siʔ} may indicate a reciprocal reading, but, crucially, only when the action was performed unintentionally (see \NNext). \ex. \ag.ka bhitta=be kila likt-u-ŋ=na\\ {\sc 1sg[erg]} wall{\sc =loc} nail drive\_in-{\sc 3.P[pst]-1sg.A =nmlz.sg} \\ \rede{I drove a nail into the wall.} \bg.ka likt-a-sy-a-ŋ=na\\ {\sc 1sg} drive\_in{\sc -pst-mddl-pst-1sg=nmlz.sg} \\ \rede{I got stuck (in the mud, head first).} \ex. \ag.ka hen=ca a-ʈukhruk dailo=be lukt-i-ŋ=na.\\ {\sc 1sg[erg]} today{\sc =add} {\sc 1sg.poss}-head door{\sc =loc} knock{\sc -compl[3.P;pst]-1sg.A=nmlz.sg} \\ \rede{I knocked my head at the door even today.} \bg.lukt-a-sy-a-ŋ-ci-ŋ=ha\\ knock-{\sc pst-mddl-pst-excl-[1]du-excl=nmlz.nsg} \\ \rede{We (dual) bumped into each other.} The semantics of verbs that take the middle marker cover the situation types commonly associated with the category of middle crosslinguistically: grooming and body care, motion, change in body posture, reciprocal events, emotion, cognition and spontaneous events. The middle marker \emph{-siʔ} encodes grammatical functions as well as lexicalized meanings, just as the reflexive/autobenefactive V2 \emph{-ca} (see \sectref{V2-eat}). For more on \emph{-siʔ} see \sectref{V2-mddl}. \subsection{V2 stems signalling animate T arguments}\label{t-sap} Certain scenarios in three-argument verbs require additional marking in Yakkha. As the T argument of three-argument verbs is typically less topic-worthy, salient or lower on a referential hierarchy than the G argument, one could expect an increase in morphological complexity in the verb when the T argument is higher on the referential hierarchy or when the G argument is lower than expected, i.e., \rede{the construction which is more marked in terms of the direction of information flow should also be more marked formally} \citep[128]{Comrie1989Language}. Such a marking is comparable to inverse marking for agent and patient, as found e.g. in Algonquian languages \citep{Zuniga2007_From}. According to \citet[90ff]{Haspelmath2007Ditransitive}\todo{Please do not use ff. Give page ranges instead}, such verbal marking has not been found for the relation of T and G in three-argument verbs yet. Yakkha, too, does not have one dedicated marker for \rede{inverse} scenarios of T and G. But there is a tendency for animate or human T (and P) arguments to require a serial verb construction, and thus more complexity in the verb. Several V2 stems can be found in this function, most prominently \emph{-khet \ti -het} \rede{carry off}, \emph{-end} \rede{insert}, \emph{-raʔ} \rede{bring} and \emph{-haks} \rede{send}. As there are several V2s with different semantics, it is not their only function to indicate referentially high T arguments. They can even be found with inanimate T arguments. The crucial point is that certain scenarios cannot be expressed without using them, as for instance in example \Next. The stealing of things is expressed by a simple verb stem (see \Next[a]), while stealing a person cannot be expressed with a simple verb. Instead, the complex construction with the V2 \emph{-het} \rede{carry off} is used, implying caused motion away from a point of reference (see \Next[b]). If, instead, the V2 \emph{-haks} \rede{send} is applied to the lexical stem \emph{khus} \rede{steal}, the meaning changes to \rede{rescue} (see \Next[c]). The simple stem \emph{khus}, however, cannot express events with human T arguments. \ex. \ag. pasal=bhaŋ yaŋ khus-uks-u=ha\\ shop{\sc =abl} money steal{\sc -prf-3.P[pst]=nmlz.nc} \\ \rede{(He) has stolen money from the shop.} \bg. ka ijaŋ a-paŋ=bhaŋ khus-het-a-ŋ-ga=na?\\ {\sc 1sg} why {\sc 1SG.poss-}house{\sc =abl} steal{\sc -V2.carry.off-pst-1.P-2.A=nmlz.sg} \\ \rede{Why did you steal me from my home?} \bg. kiba=bhaŋ khus-haks-a-ŋ-ga=na\\ tiger{\sc =abl} steal{\sc -V2.send-pst-1.P-2.A=nmlz.sg} \\ \rede{You saved me from the tiger.} Some examples from natural texts are provided in \Next. \ex.\ag.nhaŋa nnakha yapmi ta-khuwa=ci ikt-haks-u=ci,\\ and\_then those person come{\sc -nmlz=nsg} chase{\sc -V2.send-3.P[pst]-nsg.P}\\ \rede{And then she chased away those people who were coming.} \source{14\_nrr\_02.034} \bg. akka kamniwak=ci hip-paŋ tikt-u-ra-wa-ŋ-ci-ŋ\\ {\sc 1sg.poss=gen} friend{\sc =nsg} two{\sc -clf.hum} guide{\sc -3.P-V2.bring-npst-1sg.A-3nsg.P-1sg.A}\\ \rede{I will bring along two of my friends.} \source{14\_nrr\_02.023} A very typical example is also the verb \emph{pinnhaŋma} \rede{send off} shown in \Next, which has already acquired a fixed meaning \rede{marry off (one's daughter)}. \exg. m-ba=ŋa nda ka=be pin-nhaŋ-me-ŋ=na=bu=i?\\ {\sc 2sg.poss}-father{\sc =erg} {\sc 2sg} {\sc 1sg=loc} give{\sc -V2.send-npst-1sg.P=nmlz.sg=rep=q}\\ \rede{(Did they say that) your father will give you to me (in marriage)?} It can be concluded that the higher complexity and greater semantic specification of an event via serialization is necessary in, but not restricted to events with referentially high T (and occasionally also P) arguments. \subsection{Historical excursus: Stem augments}\label{stemchange} Yakkha verbal stems can be divided into unaugmented and augmented roots (see also \sectref{stem}). Both open ((C)V) and closed ((C)VC) stems can be extended by the coronal augments \emph{-s} and \emph{-t}. These augments can be related to transitivizing suffixes in Proto-Tibeto-Burman, often with \emph{-s} coding a causative and \emph{-t} coding a directive or a benefactive derivation (see \citet[457]{Matisoff2003Handbook}, \citealt[160]{Driem1989_Reflexes}). Synchronically, however, the augmentation does not constitute a productive pattern. Some reflexes of this old system can, however, still be found in correspondences such as in \tabref{stem-aug}, albeit only for a small fraction of the verbal lexicon. Complete stem triads (consisting of an unaugmented, an \emph{-s}-augmented and a \emph{-t}-augmented root) are exceedingly rare, and synchronically, many intransitive verbs with augmented stems exist as well, which clearly shows that a regular correspondence between augmentation and transitivization is not given synchronically.\footnote{Comparing the stems in Yakkha with other Kiranti languages, the form and meaning of the augmented stems do not correspond across individual languages. Unaugmented stems in one Kiranti language may have augments in another language, and augments may differ for cognate roots, which adds support to the reconstruction of these augments as non-integral part of the verbal stem, i.e., as suffixes.} The stem alternations do not necessarily entail an increase in the number of arguments; sometimes just the properties of the arguments change, along with the case and person marking. For instance, \emph{haks} and \emph{hakt} both mean \rede{send}, but the goal of \emph{haks} is in the locative case and referentially unrestricted, while \emph{hakt} takes a human goal in the nominative, which also points to the former use of the augment /-t / as a benefactive marker. We have seen above in \sectref{benefactive} that there is also a suffix \emph{-t} in the benefactive derivation, which is probably also related to these old suffixes. It is only employed as a secondary marker, accompanying the primary benefactive marker, the V2 \emph{-piʔ}. \begin{table}[htp] \begin{center} \resizebox{\textwidth}{!}{ \begin{tabular}{lll} \lsptoprule (C)V(C) &(C)V(C)-s&(C)V(C)-t\\ \midrule \emph{ap} \rede{come} (same level, close) & &\emph{apt} \rede{bring}\\ & \emph{haks} \rede{send somewhere}& \emph{hakt} \rede{send to someone}\\ \emph{keʔ} \rede{come up} & &\emph{ket} \rede{bring up}\\ \emph{kheʔ} \rede{go} & &\emph{khet} \rede{carry off}\\ \emph{khuʔ} \rede{carry} & \emph{khus} \rede{steal}&\emph{khut} \rede{bring}\\ \emph{luʔ} \rede{tell, say} & \emph{lus} \rede{deafen, roar}&\emph{lut} \rede{tell for s.o. (story, song)}\\ &\emph{maks} \rede{wonder, look around}&\emph{makt} \rede{see in dream} \\ \emph{si} \rede{die} &\emph{sis} \rede{kill}&\\ \emph{ta} \rede{come} (general) &\emph{tas} \rede{arrive at}&\emph{taʔ} \rede{bring to}\\ & \emph{uks} \rede{come down}& \emph{ukt} \rede{bring down}\\ \emph{yuŋ} \rede{sit} &\emph{yuks} \rede{put}&\emph{yukt} \rede{put for s.o.}\\ \lspbottomrule \end{tabular} } \caption{Stem augmentation and transitivity correspondences}\label{stem-aug} \end{center} \end{table} The stem \emph{tup} \rede{meet} also undergoes the stem alternation. While the unaugmented stem is inherently reciprocal and is thus inflected intransitively (and thus, necessarily, takes nonsingular arguments), the stem \emph{tups} is transitively inflected and takes two arguments that cannot have identical reference. \ex. \ag. kanciŋ tub-a-ŋ-ci-ŋ=ha\\ {\sc 1du} meet-{\sc pst-excl-du-excl=nmlz.nsg}\\ \rede{We met.} \bg.ka ŋ-gamnibak n-dups-u-ŋa-n=na\\ {\sc 1sg[erg]} {\sc 2sg.poss-}friend {\sc neg-}meet{\sc -3.P[pst]-1sg.A-neg=nmlz.sg}\\ \rede{I did not meet/find your friend.}
{ "alphanum_fraction": 0.7422849639, "avg_line_length": 95.6143497758, "ext": "tex", "hexsha": "43f7dad5d551712380146f834fdeb6530d153db2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "37a7473097d2c8ed7787bfda95096b940d2db6c5", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "langsci/66", "max_forks_repo_path": "chapters/10b_TransitivityOperations_b.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "37a7473097d2c8ed7787bfda95096b940d2db6c5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "langsci/66", "max_issues_repo_path": "chapters/10b_TransitivityOperations_b.tex", "max_line_length": 1647, "max_stars_count": null, "max_stars_repo_head_hexsha": "37a7473097d2c8ed7787bfda95096b940d2db6c5", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "langsci/66", "max_stars_repo_path": "chapters/10b_TransitivityOperations_b.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6700, "size": 21322 }
\chapter{Experiments and Results}\label{ch4:experiments-results} In this chapter, we present some quantitative and qualitative evaluations of the variants of the recreated single-view MPI model retrained on different combinations of the MannequinChallenge and RealEstate10K datasets. We use the pretrained weights of the single-view MPI model as the benchmark and compare the abilities of all model variants at hand to generate novel views. We adopt some of the quantitative metrics from Tucker and Snavely's single-view MPI paper~\cite{single_view_mpi} --- PSNR, SSMI~\cite{wang_image_2004}, and LPIPS~\cite{zhang_unreasonable_2018} --- to give numeric values to the similarities between MPI-rendered video frames and the corresponding ground truth target frames the rendering process attempts to replicate. The model variants used to compute the metrics stated above are characterized by the following hyperparameters/metadata: \begin{itemize} \item Depth loss weight, as explained in subsection~\ref{subsec:base-papers}. \item The number of disparity map channels specified in the \texttt{tf.function} input signature for the bilinear sampling function in our training script (Section~\ref{sec:code-sources}),\\\texttt{sample\_disparities(disparity,points)}, involving the predicted disparity and the input visible points. \item The lower bound on the number of visible points required per frame. Videos with even one frame having the number of visible points below this threshold would be removed from training. \item The choice of datasets used to train on --- MannequinChallenge, RealEstate10K, or both. \item Whether multiple GPU workers were engaged or not. \end{itemize} Even seemingly innocuous hyperparameter values such as those for the number of disparity map channels specified, we believe, could have easily held sway over training progress. Pitting these variants against each other in terms of the three computed metrics helped us select the best variant to simulate one half of a video chat with. We manually sifted through the in-built test set of the MannequinChallenge dataset to handpick a set of 333 videos with 12,595 frames in total. These ORB-SLAM2-curated sequences had video-chat-relevant features. They mostly had the heads and torsos of people being focused on rather than there being wide shots of entire bodies. The number of people in the frames was mostly limited to one or two as opposed to there being multiple people featured. Moreover, although not a strict requirement, the head pose of people in these frames was roughly or even very loosely aligned with the camera. There was hardly anybody in any frame that seemed to look directly at the camera, such as would be expected in an actual video chat scenario. We put these cherry-picked frames in the \texttt{test-yes/} bin. We also curated \texttt{test-maybe/} (300 videos with 12,831 frames) and \texttt{test-no/} (24 videos with 728 frames) bins. They consisted of the rest of the MannequinChallenge test set with sequences either having no relevance to typical video chat settings (like there being hardly anyone in the frames) in the case of \texttt{test-no/} or falling heavily in the gray areas between \texttt{test-yes/} and \texttt{test-no/} in the case of \texttt{test-maybe/}. We even occasionally interspersed the \texttt{test-yes/} and \texttt{test-maybe/} bins with videos containing sequences that portrayed people facing diametrically opposite to the camera. This was just so we could really challenge the model variant being tested. Of the various aspects of the code that we modelled from the textual descriptions and relevant code snippets obtained from both the single-view and stereo MPI papers such as \texttt{generator\_wandb.py}, \texttt{data\_loader.py}, \texttt{train\_wandb.py}, and \texttt{test.py}, the scripts relevant to the experiments in this section are \texttt{test.py} and\\\texttt{generator\_wandb.py} (Section~\ref{sec:code-sources}). For testing, the generator first aggregates all videos names from the directory input to it and for each of these, it picks various \texttt{reference\_image} and \texttt{target\_image} pairs which are internally 5 frames apart. \texttt{reference\_image} is the frame that \texttt{test.py} uses to infer the MPI representation of the scene from and \texttt{target\_image} is supposedly a view of the same scene from a different angle. The possibility that, when the camera moves from one scene to another in the same video, \texttt{reference\_image} may depict a scene different from the one captured by \texttt{target\_image} is expected to be extremely low as both datasets have been curated by similar ORB-SLAM2 and COLMAP processes. In such hypothetical cases, \texttt{target\_image} will be erroneously rendered by \texttt{mpi.render} function as the corresponding \texttt{rendered\_image}. But since we take the mean of the computed metrics over hundreds of \texttt{test.py} processed \texttt{reference\_image}, \texttt{target\_image} pairs, we believe the final accuracies of a variant's mean metrics will not be off the tracks much and that they shall still be used to determine a variant’s performance satisfactorily. Each of the three metrics are calculated between \texttt{target\_image} and \texttt{rendered\_image}, which are situated 5 frames apart along the camera trajectory of the respective clip. We did not repeat the same test process for frames 10 apart, which would just have done been to show (as in the case of the single-view MPI paper) that the longer the baseline between reference/source and target views, the less the accuracy will be of the rendered image. On the same note, we have also not calculated the metrics internally for all processed \texttt{(reference\_image, target\_image)} pairs, which would just have been done to catch the hypothetical anomalies of the complete scene changes mentioned before. We also took an interesting little detour in our project when we attempted to parallelize training across multiple GPUs, which we believed would allow us to increase the batch size\footnote{currently limited to 4 pairs of reference and target images and their respective camera poses and intrinsics, along with the 3D points of the reference image} and thereby let larger and larger parts of our 60000+ training ready sequences with associated point clouds be used for learning by our recreated model. This would have assisted the model in better avoiding local minima and maxima. But, since TensorFlow's direct conversion procedure that would let standard single-GPU-utilizing scripts become multi-GPU-faring is as yet still an evolving process requiring careful attention to resource allocation issues among the various replicas of the parallelizable aspects of the model\footnote{such as the dataset generator, the loss functions aggregator, etc.} spread across GPUs, our training got undercut after a good start by a resource exhaustion error at training step 178. Nevertheless, we computed all three metrics for this other model variant retrained on MannequinChallenge data using \texttt{tf.distribute.MirroredStrategy}, and capable of harnessing the power of multiple GPUs. The rest of this chapter presents the results of the experiments done with the various model variants and the baseline pretrained model. We then cap it all off by presenting the results of incorporating OpenFace 2.2 into the inference pipeline. As of this writing, our generator is only able to pick random pairs of reference and target frames from the 333 \texttt{test-yes/} videos. Sequential pair-picking would avoid possible repetition of selected pairs and allow for an exhaustive coverage of the test set. Given that even the smaller of the two datasets has 100,000+ frames and that we have not been able to resolve the issue of the synthesized disparity maps becoming smudgier and smudgier until they turn completely gray/monochromatic even before any of the variants hit 14,000 training steps, it is not very likely that the model may see the same frame twice. So perhaps, computing evaluation metrics with training data can double in as doing the same with validation data itself, even though we haven't set aside validation data. As for the metrics, an LPIPS value of 0 indicates there is either a perfect match between the images being compared or the images being compared are one and the same. To the contrary, SSIM values of 1 indicate a perfect match. Both these metrics range from 0 to 1. PSNR values, measured in decibels (dB), don't generally have an upper limit but values 20 dB and higher are considered acceptable. In calibrating our implementations of these metrics, when we compared an image with itself, we found the mean LPIPS, SSIM and PSNR values over 300 images to be close to 0, 1, and 30, respectively. \begin{figure}[!h] \includegraphics[width=1\columnwidth]{figures/model-variants-metrics.png} \caption{Model Variants' Mean PSNR, SSIM, and LPIPS Evaluation Values over 300 Testing Instances} \label{fig:model-variants-metrics} \end{figure} We can get a sense of how the variants stack up against one another from figure~\ref{fig:model-variants-metrics}. Perceptual similarity comes the closest to the way humans judge the picture quality of an image. Hence, we chose the variant northern-monkey-4 for the final step of simulating a video chat. These interesting names are automatically allotted by wandb.ai at the start of any training run. If the run is relatively successful, we use the final model produced by it as one of our variants and evaluate its performance. All our variants have been trained to the limit and to the point where the loss becomes less than 1, after having come down all the way from 1188, and stagnates. This has always occurred sooner than 25,000 training steps for all our variants (Figure~\ref{fig:mean-loss}). It goes to show that had we been entirely successful in our implementation of the model, we would also have been able to train for way more than 100,000 steps, similarly to Tucker and Snavely~\cite{single_view_mpi}. \begin{figure}[!h] \includegraphics[width=0.75\columnwidth]{figures/mean-loss.png} \caption{Typical Mean Loss Chart for Any of Our Training Runs} \label{fig:mean-loss} {\small wandb.ai somehow always shows twice the number of actual training steps completed on our server. Hence all our variants' training stagnates at 30,000+ steps and not at the 60,000+ steps shown in this wandb.ai-logged loss chart.} \end{figure} \begin{figure}[!h] \begin{tabular}{cccc} \subfloat[]{\includegraphics[width = 1.3in]{figures/baseline/000001_image_disparity.png}} & \subfloat[PSNR $\uparrow$ Target vs Rendered = 12.345]{\includegraphics[width = 1.3in]{figures/baseline/000001_image_reference.png}} & \subfloat[SSIM $\uparrow$ Target vs Rendered = 0.509]{\includegraphics[width = 1.3in]{figures/baseline/000001_image_target.png}} & \subfloat[LPIPS $\downarrow$ Target vs Rendered = 0.520]{\includegraphics[width = 1.3in]{figures/baseline/000001_image_render.png}}\\ \subfloat[]{\includegraphics[width = 1.3in]{figures/gallant-eon-27/000001_image_disparity.png}} & \subfloat[PSNR $\uparrow$ Target vs Rendered = 12.300]{\includegraphics[width = 1.3in]{figures/gallant-eon-27/000001_image_reference.png}} & \subfloat[SSIM $\uparrow$ Target vs Rendered = 0.470]{\includegraphics[width = 1.3in]{figures/gallant-eon-27/000001_image_target.png}} & \subfloat[LPIPS $\downarrow$ Target vs Rendered = 0.338]{\includegraphics[width = 1.3in]{figures/gallant-eon-27/000001_image_render.png}}\\ \subfloat[]{\includegraphics[width = 1.3in]{figures/giddy-microwave-29/000001_image_disparity.png}} & \subfloat[PSNR $\uparrow$ Target vs Rendered = 12.282]{\includegraphics[width = 1.3in]{figures/giddy-microwave-29/000001_image_reference.png}} & \subfloat[SSIM $\uparrow$ Target vs Rendered = 0.470]{\includegraphics[width = 1.3in]{figures/giddy-microwave-29/000001_image_target.png}} & \subfloat[LPIPS $\downarrow$ Target vs Rendered = 0.338]{\includegraphics[width = 1.3in]{figures/giddy-microwave-29/000001_image_render.png}} \end{tabular} \caption{Baseline and MannequinChallenge+RealEstate10K-based Model Variants' Output Visualizations with a MannequinChallenge Target Frame} \label{fig:output-visualizations-1} {\small Variants from top to bottom: baseline, gallant-eon-27, giddy-microwave-29\\Outputs from left to right: disparity map, reference frame, target frame, rerendered target} \end{figure} What further validates our choice of northern-monkey-4 is the set of output visualizations for all relatively successful model variants shown in figures~\ref{fig:output-visualizations-1} and~\ref{fig:output-visualizations-2}. These outputs further reveal that even prior to all our fine-tuning the pretrained model found it hard to synthesize the disparity for video-chat-relevant frames. In the testing example used, the person has clearly moved closer to the camera but the frame synthesized by the baseline model shows ``stack of cards" effects. This could potentially also be the reason that while the picture quality for the renderings seems to have been greatly improved by our fine-tuning (evident from the improved LPIPS values), the already nebulous disparity synthesis (when it comes to video chat frames) has been rendered asunder. It also stands to reason that perhaps depth/disparity is taken more into account by the SSIM metric than the other two metrics, owing to the stark decrease in SSIM values for the fine-tuned variants. It is structural similarity after all, and we have already established that depth is part of the 3D structure of the scene. Hence, we have also been further validated in our efforts to even stick to the course of retraining the baseline model in the first place. \begin{figure}[!h] \begin{tabular}{cccc} \subfloat[]{\includegraphics[width = 1.3in]{figures/northern-monkey-4/000001_image_disparity.png}} & \subfloat[PSNR $\uparrow$ Target vs Rendered = 12.315]{\includegraphics[width = 1.3in]{figures/northern-monkey-4/000001_image_reference.png}} & \subfloat[SSIM $\uparrow$ Target vs Rendered = 0.470]{\includegraphics[width = 1.3in]{figures/northern-monkey-4/000001_image_target.png}} & \subfloat[LPIPS $\downarrow$ Target vs Rendered = 0.337]{\includegraphics[width = 1.3in]{figures/northern-monkey-4/000001_image_render.png}}\\ \subfloat[]{\includegraphics[width = 1.3in]{figures/sunny-grass-5/000001_image_disparity.png}} & \subfloat[PSNR $\uparrow$ Target vs Rendered = 12.315]{\includegraphics[width = 1.3in]{figures/sunny-grass-5/000001_image_reference.png}} & \subfloat[SSIM $\uparrow$ Target vs Rendered = 0.470]{\includegraphics[width = 1.3in]{figures/sunny-grass-5/000001_image_target.png}} & \subfloat[LPIPS $\downarrow$ Target vs Rendered = 0.337]{\includegraphics[width = 1.3in]{figures/sunny-grass-5/000001_image_render.png}}\\ \subfloat[]{\includegraphics[width = 1.3in]{figures/fast-monkey-7/000001_image_disparity.png}} & \subfloat[PSNR $\uparrow$ Target vs Rendered = 12.284]{\includegraphics[width = 1.3in]{figures/fast-monkey-7/000001_image_reference.png}} & \subfloat[SSIM $\uparrow$ Target vs Rendered = 0.471]{\includegraphics[width = 1.3in]{figures/fast-monkey-7/000001_image_target.png}} & \subfloat[LPIPS $\downarrow$ Target vs Rendered = 0.339]{\includegraphics[width = 1.3in]{figures/fast-monkey-7/000001_image_render.png}}\\ \end{tabular} \caption{MannequinChallenge-based Model Variants' Output Visualizations with a MannequinChallenge Target Frame} \label{fig:output-visualizations-2} {\small Variants from top to bottom: northern-monkey-4, sunny-grass-5, fast-monkey-7\\Outputs from left to right: disparity map, reference frame, target frame, rerendered target} \end{figure} In reference to the qualitative results presented throughout this work, Tucker and Snavely used pointers such as the handling of occluded content, the production of undesirable artifacts at the edges of foreground objects, and so on to qualitatively compare the discrepancies in the results generated by each model variant. In addition to visually checking for these, we, like the authors, found that visually checking the disparity maps is also useful in verifying the quality of the MPIs produced. We encourage the reader to zoom into the electronic version of this thesis or take to the GitHub repository accompanying this work (Section~\ref{sec:code-sources}) for easier visual verification. % To cap it off with the help of OpenFace 2.2, we also include a few snapshots of how a rerendered frames vary with changes in head pose in figure~\ref{fig:rerendered-with-openface}. % As the girl looks to the left the opposite concurrent video frame moves to the right and vice versa % Please head over to the github repo for this project to observe videos of simultaneous changes simulating a two way video chat.
{ "alphanum_fraction": 0.7848271036, "avg_line_length": 180.5157894737, "ext": "tex", "hexsha": "63497c08827e554ab86a97f422ddc1220a49e7da", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c1161004f6f65f40143754d87a4663796a705a5d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "au001/thesis-template", "max_forks_repo_path": "chapters/ch4-experiments-results/experiments-results.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c1161004f6f65f40143754d87a4663796a705a5d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "au001/thesis-template", "max_issues_repo_path": "chapters/ch4-experiments-results/experiments-results.tex", "max_line_length": 2362, "max_stars_count": null, "max_stars_repo_head_hexsha": "c1161004f6f65f40143754d87a4663796a705a5d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "au001/thesis-template", "max_stars_repo_path": "chapters/ch4-experiments-results/experiments-results.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4146, "size": 17149 }
\section{Appendix} \subsection{Acknowledgements} \begin{table}[H] \centering \begin{tabular}{|p{0.15\textwidth}|p{0.6\textwidth}|p{0.05\textwidth}|p{0.15\textwidth}|} \hline Name & Use & & Subsystem \\ \hline Rider & Primary IDE for development & \href{https://www.jetbrains.com/rider}{link} & DEV \\ \hline YouTrack & Issue tracking software & \href{https://www.jetbrains.com/youtrack}{link} & DEV \\ \hline VSCode & Editor used for developing smart contracts & \href{https://code.visualstudio.com}{link} & DEV \\ \hline RemixIDE & Web based IDE for developing, testing, deploying and interacting with smart contracts & \href{https://remix.ethereum.org/}{link} & DEV \\ \hline Azure DevOps & CD/CI pipeline for running tests and deploying to Azure & \href{https://azure.microsoft.com/en-us/services/devops}{link} & DEV \\ \hline Azure & Cloud based deployment & \href{https://azure.microsoft.com/}{link} & DEV \\ \hline Ethereum & Public open source blockchain software with smart contract support & \href{https://ethereum.org}{link} & BLOCK \\ \hline Solidity & Smart contract programming compiler & \href{https://docs.soliditylang.org}{link} & BLOCK \\ \hline Metamask & Browser extension based Ethereum wallet with over 30 million users & \href{https://metamask.io}{link} & BLOCK \\ \hline Hardhat & Smart contract development environment & \href{https://hardhat.org}{link} & BLOCK \\ \hline Waffle & Smart contract testing framework & \href{https://getwaffle.io}{link} & BLOCK \\ \hline ASP.NET & Web application/service framework for .NET & \href{https://dotnet.microsoft.com/en-us/apps/aspnet}{link} & BACK-END \\ \hline .NET & Cross platform development framework for C\# & \href{https://dotnet.microsoft.com/en-us/}{link} & BACK-END \\ \hline EFCore & Object database mapper for .NET & \href{https://docs.microsoft.com/en-us/ef/core/}{link} & BACK-END \\ \hline Nethereum & Ethereum interaction library for .NET & \href{https://nethereum.com/}{link} & BACK-END \\ \hline NUnit & .NET test runner & \href{https://nunit.org/}{link} & BACK-END \\ \hline AutoMapper & .NET object mapping library & \href{https://automapper.org/}{link} & BACK-END \\ \hline Cronos & Cron job handling library & \href{https://www.nuget.org/packages/Cronos/}{link} & BACK-END \\ \hline iTextSharp & PDF interaction and modification library used for digital signing & \href{https://www.nuget.org/packages/iTextSharp/}{link} & BACK-END \\ \hline TagLibSharp & Exif tag library used for digital signing & \href{https://github.com/mono/taglib-sharp}{link} & BACK-END \\ \hline ImageSharp & Image metadata library used for digital signing & \href{https://github.com/SixLabors/ImageSharp}{link} & BACK-END \\ \hline TypeScript & JavaScript superset providing strong typing & \href{https://www.typescriptlang.org/}{link} & FRONT-END \\ \hline Angular & JavaScript framework for development of single page dynamic web applications & \href{https://angular.io/}{link} & FRONT-END \\ \hline Clarity Design & CSS and JavaScript design framework & \href{https://clarity.design/}{link} & FRONT-END \\ \hline RxJs & Library for asynchronous JavaScript programming & \href{https://rxjs.dev/}{link} & FRONT-END \\ \hline Signalr & Web-socket platform & \href{https://www.npmjs.com/package/@microsoft/signalr}{link} & FRONT-END \\ \hline web3.js & Ethereum JavaScript API library & \href{https://www.npmjs.com/package/web3}{link} & FRONT-END \\ \hline \end{tabular} \end{table} \subsection{Operational diagrams} \begin{figure}[H] \caption{Copyright registration sequence} \centering \includegraphics[width=\textwidth,height=0.9\textheight,keepaspectratio]{images/operational/CopyrightRegistration} \end{figure} \begin{figure}[H] \caption{Ownership restructure sequence} \centering \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{images/operational/OwnershipRestructure} \end{figure} \begin{figure}[H] \caption{Dispute with payment expected recourse sequence} \centering \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{images/operational/PaymentDispute} \end{figure} \begin{figure}[H] \caption{User Authentication (Login/Create Account) sequence} \centering \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{images/operational/Authentication} \end{figure} \begin{figure}[H] \caption{Angular Guarding of pages sequence} \centering \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{images/operational/Guard} \end{figure} \begin{figure}[H] \caption{Blockchain event processing sequence} \centering \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{images/operational/EventProcessingQueue} \end{figure} \begin{figure}[H] \caption{Smart contract Register function flowchart} \centering \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{images/operational/Register} \end{figure} \begin{figure}[H] \caption{Resonance service (WebSocket service) sequence} \centering \includegraphics[width=\textwidth,height=\textheight,keepaspectratio]{images/operational/ResonanceService} \end{figure} \subsection{Design docs} \begin{figure}[H] \caption{Original index page wireframe} \centering \includegraphics[width=0.7\textwidth,height=\textheight,keepaspectratio]{images/appendix/design/docs/index} \end{figure} \begin{figure}[H] \caption{Original dashboard page wireframe} \centering \includegraphics[width=0.8\textwidth,height=\textheight,keepaspectratio]{images/appendix/design/docs/dash} \end{figure} \begin{figure}[H] \caption{Early system architecture diagram} \centering \includegraphics[width=0.7\textwidth,height=\textheight,keepaspectratio]{images/appendix/design/docs/arch} \end{figure} \begin{figure}[H] \caption{Copyright registration state flow diagram} \centering \includegraphics[width=0.7\textwidth,height=\textheight,keepaspectratio]{images/appendix/design/docs/reg} \end{figure} \begin{figure}[H] \caption{Ownership restructure state flow diagram} \centering \includegraphics[width=0.7\textwidth,height=\textheight,keepaspectratio]{images/appendix/design/docs/restruct} \end{figure} \begin{figure}[H] \caption{Original dispute handling flow diagram} \centering \includegraphics[width=0.7\textwidth,height=\textheight,keepaspectratio]{images/appendix/design/docs/dispute} \end{figure} \begin{figure}[H] \caption{Original blockchain event listening diagram} \centering \includegraphics[width=0.7\textwidth,height=\textheight,keepaspectratio]{images/appendix/design/docs/event} \end{figure} \subsection{Sprint reviews} Sprint reviews can also be found on the \href{https://github.com/mrharrisonbarker/crpl#readme}{README}. \includepdf[pages=-]{./images/appendix/SprintReviews.pdf} \subsection{User guide} The user guide can be found at \href{https://github.com/MrHarrisonBarker/CRPL/wiki}{https://github.com/MrHarrisonBarker/CRPL/wiki} If you want to run this software locally on your machine then I would strongly suggest following my instructions on the \href{https://github.com/MrHarrisonBarker/CRPL#readme}{README} for installation and running. \subsection{Test results} \label{sec:test-results} \subsubsection{Smart contract unit test results} \label{sec:test-results:smart} \includegraphics[width=0.7\textwidth,height=\textheight,keepaspectratio]{images/appendix/tests/hardhat-1} \vfill \includegraphics[width=0.7\textwidth,height=\textheight,keepaspectratio]{images/appendix/tests/hardhat-2} \subsubsection{Back-end unit test results} \label{sec:test-results:back} \includepdf[pages=-]{./images/appendix/tests/Back-end-Unit-results.pdf} \subsubsection{Front-end unit test results} \label{sec:test-results:front} \includepdf[pages=-]{./images/appendix/tests/angular-test-results.pdf}
{ "alphanum_fraction": 0.6110997338, "avg_line_length": 59.1878787879, "ext": "tex", "hexsha": "3db5b69b0cd1d208cd9c2205f6fb4c9d5a0e07bd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "51fd422d98b1ac20b0fb25c52c68b1b95a5d36ad", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "MrHarrisonBarker/CRPL", "max_forks_repo_path": "Report/appendices.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "51fd422d98b1ac20b0fb25c52c68b1b95a5d36ad", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "MrHarrisonBarker/CRPL", "max_issues_repo_path": "Report/appendices.tex", "max_line_length": 212, "max_stars_count": null, "max_stars_repo_head_hexsha": "51fd422d98b1ac20b0fb25c52c68b1b95a5d36ad", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "MrHarrisonBarker/CRPL", "max_stars_repo_path": "Report/appendices.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2213, "size": 9766 }
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage{graphicx} \graphicspath{{images/}} \usepackage{subcaption} \renewcommand{\familydefault}{\sfdefault} \usepackage[a4paper]{geometry} \usepackage{listings} \lstset{language=SQL} \usepackage{tcolorbox} \newtcolorbox{keypointbox} { arc=0mm, colback=red!20, colframe=red!80, leftrule=5pt, toprule=0pt, rightrule=0pt, bottomrule=0pt } \setcounter{secnumdepth}{2} \usepackage{amsmath, centernot} \newcommand{\bigCI}{\mathrel{\text{\scalebox{1.07}{$\perp\mkern-10mu\perp$}}}} \newcommand{\nbigCI}{\centernot{\bigCI}} \usepackage{hyperref} \usepackage{cleveref} \title{Probabilistic Models and Inference} \author{Alexander Schlögl} \begin{document} \maketitle \tableofcontents This is \textbf{my interpretation} of the lecture slides. I tried to be very verbose and explain everything, all while removing irrelevant parts from the lecture. Using this you should be able to pass the lecture easily. \large{\textbf{However, I do not take responsibility for any bad results and will not be blamed from anyone. This was a lot of work and I did it to save others (especially students of following semesters) from having to do this themselves. Use this summary at your own responsibility.}} If you have any feedback, feel free to create an issue on the \href{https://github.com/alxshine/lecture-notes}{git}. I don't promise I will fix anything, but I will try. \newpage \section{Probability Theory} \subsection{Basics} The following are a few definitions and explanations for concepts needed in this lecture. \subsubsection{Machine Learning} The sole reason for this course is to train students in the basics needed for machine learning. Why this course is held a semester \emph{after} the actual Advanced Machine Learning course I don't know. Machine learning is the process of generating a classifying function $y(x)$ from a training set ${x_1, ..., x_n}$. This classifier can then be used to determine the class a new input $x'$ belongs to. The classifier is \emph{learned} from the training set during the \emph{training phase}. It is then usually evaluated on its performance on a \emph{test set}. Machine learning is very useful for finding complex classification functions, as it removes the need of developing a complex algorithm by approximating the underlying probability distribution of $X$. The performance of the resulting classifier is heaviliy influenced by how closely the traninig set approximates the actual distribution of $X$. A classifier's ability to classify new input after training is called its ability to \emph{generalize}. The two main methods of machine learning are \emph{supervised}, where the training data is labelled with their corresponding classes, and \emph{unsupervised}, where the training data is unlabelled and the classifier splits the training set in different clusters. As data taken from the real-world is usually not split into training and test data, classifiers are often trained using cross validation. With this method the available data is randomly split into training and test data, according to some desired ratio. \subsubsection{Random Variables} A random variable $X$ is sort of a shorthand for the set of results contained in a probability distribution. The actual discrete probabilities are then given using the form $p(X = x_i)$ (remember that for a continuous case $p(X = x_i) = 0$). In his lecture slides Antonio uses functions ($f(x)$) and random variables ($X$) sort of interchangeably. This is correct from a mathematical standpoint, but might be confusing for students. Just remember that they are essentially different terms for the same thing. Even worse, I sometimes use $X$ as a random variable, and sometimes as a set. This is also correct, but again might confuse some readers. However, it should be pretty clear from context which I mean, and also there shouldn't be any cases where it makes any tangible difference. \subsubsection{Expectation} Expectation is exactly what the name says: the weighted average value one can expect when drawing from a random source. It is the average of the resutls, weighted by their probabilities. The formula for this is as follows: $E(f) = \sum_x p(x) f(x)$ for discrete cases and $E(f) = \int p(x) f(x) dx$ for continuous cases. Given a finite number of points drawn from a distribution, the expectation can be approximated using the mean: $E(f) \approx \frac{1}{N} \sum_{n=1}^N f(x_n)$. \subsubsection{Covariance} The \emph{variance} of a function measures how "broad" it is (how much variablility of values it has around its mean value). It is calculated according to the formula $var[f] = E[(f(x) - E[f(x)]^2] = E[f(x)^2] - E[f(x)]^2$. \emph{Covariance} expresses the extent to which distributions vary \emph{together}. For two distributions it is given as $cov[x,y] = E_{x,y} [xy] - E[x]E[y]$. \subsubsection{The Gaussian Distribution} The Gaussian or \emph{Normal Distribution} is given by the formula \begin{equation} \mathcal{N}(x|\mu, \sigma^2) = \frac{1}{\sqrt{2\pi\sigma^2}} exp \left[ -\frac{1}{2\sigma^2} (x-\mu)^2 \right] \end{equation} \subsection{Rules of Probabilities} There are some rules you can use when working with probabilities. Call them the "rules of probability algebra" if you will. I will try to explain them using intuitive terms. The one function we need for this is the count function $\#(X)$, which gives the number of elements contained in a set. \subsubsection{Sum Rule} The sum rule can be used for calculating joint probabilities from a set of random samples. Imagine we draw $N$ samples from discrete random distributions $X$ and $Y$, counting them and writing the counts into a table. The column index $i$ tells us which value $X$ has ($X = x_i$), and the row index $j$ does the same for $Y$ ($Y = y_j$). Contained in the cell $n_{ij}$ is the count of how often $X=x_i, Y=y_j$ occured. This gives us the formula $P(X=x_i, Y=y_j) = \frac{n_{ij}}{N}$ for the joint probability of $X=x_i, Y=y_j$. The \emph{marginal} probability of $X=x_i$ is $\frac{\#(X = x_i)}{N}$. $\#(X = x_i)$ is the total count in column $i$, which is the sum of all cells in that column. This gives us the sum rule: \begin{equation} P(X = x_i) = \frac{\#(X=x_i)}{N} = \frac{1}{N} \sum_j \#(X = x_i, Y = y_j) = \sum_j P(X=x_i, Y=y_j) \end{equation} \subsubsection{Product Rule} Sticking with the example from above, we can also find a rule for calculating \emph{conditional probabilities}. $P(Y=y_j)$ is the sum of counts in row $j$, divided the total sum of the table. The conditional probability $P(Y=y_j| X=x_i)$ is then $n_{ij}$, divided by the count in column $i$: \begin{equation} P(Y=y_j|X=x_i) = \frac{n_{ij}}{c_i} = \frac{n_{ij}}{\sum_{j'} n_{ij'}} \end{equation} This gives us the following three equations: \begin{align} P(Y=y_j|X=x_i) &= \frac{n_{ij}}{c_i}\\ P(X=x_i, Y=y_j) &= \frac{n_{ij}}{N}\\ P(X=x_i) &= \frac{c_i}{N} \end{align} resulting in the product rule \begin{equation} P(X=x_i, Y=y_j) = \frac{n_{ij}}{N} = \frac{n_{ij}}{c_i} \frac{c_i}{N} = P(Y=y_j | X=x_i) P(X=x_i) \end{equation} \begin{keypointbox} As a quicker notation for $P(X=x_i)$, $P(X)$ is often used. As $x_i$ is an arbitrary fixed value in $X$, this doesn't make any difference whatsoever anywhere. \end{keypointbox} \subsubsection{Bayes' Theorem} From the product rule we know that $P(X,Y) = P(X|Y) P(Y)$. This also works in the opposite direction: $P(X|Y) = \frac{P(Y,X)}{P(Y)}$. If we now expand the part of $P(Y,X)$ in the nominator we get Bayes' Theorem: \begin{equation} P(X|Y) = \frac{P(Y|X) P(X)}{P(Y)} \end{equation} \subsubsection{Likelihood} Given a set of parameters of a probability distribution $\mathbf{w}$ and a set of observed events $\mathcal{D}$ we can calculate the probability of $\mathcal{D}$. Using Bayes' theorem we can also calculate the most likely parameters $\mathbf{w}$ that gave rise to $\mathcal{D}$. This is called the Maximum Likelihood method. The formula is a simple application of Bayes' theorem, and looks as follows: \begin{equation} P(\mathbf{w}|\mathcal{D}) = \frac{P(\mathbf{w} | \mathcal{D})}{P(\mathcal{D})} \end{equation} Then we look for the set of parameters that has the highest likelihood. As this is very hard to find, we often use other methods, but all use this as their basis. Note also that $P(\mathcal{D})$ is identical for all parameter combinations, so we can just ignore it. \subsection{Probability Densities} As we also want to consider probabilities for continuous domains, we are interested in probability densities. The probability of some $x \in \mathbf{R}$ falling in $(x, x+\delta)$ is given by $p(x)\delta x$. If $\delta x \rightarrow 0$ then $p$ is called the probability density over $x$. Probability densities fulfill the following conditions: \begin{align} p(x \in (a,b)) &= \int_a^b p(x) dx\\ p(x) &\ge 0\\ \int_{- \inf}^{\inf} p(x) &= 1 \end{align} The sum and product rules take the forms: \begin{align} p(x) &= \int p(x,y) dy\\ p(x, y) &= p(y|x) p(x) \end{align} \subsubsection{Independent Variables} Iff the joint probability $P(X,Y)$ factorizes to the product of marginal probabilities $P(X)P(Y)$, then $X$ and $Y$ are said to be independent. This also means that $P(Y|X) = P(Y)$. \subsubsection{Conditional Independence} Even if $X$ and $Y$ are not independent, they still might be independent given some $Z$. This means that $P(X,Y | Z) = P(X|Z) P(Y|Z)$ and is written as: \begin{equation} X \bigCI Y \ |\ Z \end{equation} Note that this is directly related to independence, because then we would require $X \bigCI Y\ |\ \emptyset$, and $P(X,Y) = P(X)P(Y)$. \section{Distribution Estimation} As we can't find the actual underlying distribution just by observing events, we resort to using models for approximating it. A lot of these models are graphical, because this makes the complex relationship between different random variables easier to understand. Representing the system as a graph also allows us to use algorithms from graph theory. In general the nodes (or vertices) represent random variables, and the links (edges) represent probabilistic relationships. \subsection{Bayesian Networks} Bayesian Networks represent relationships between probabilities using directed acyclic graphs (DAGs). They can be used to make predictions for random variables and their relations given a set of observations. Nodes that are not connected in the DAG are conditionally independent. A DAG $X$ is a Bayesian network iff it satisfies the \emph{Local Markov Property}: each variable is conditionally independent of its non-descendants given its parent variables, in other words: \begin{multline} P(X_v = x_v | X_i = x_i \textup{ for each $X_i$ which is not a descendant of $X_v$}) \\ = P(X_v = x_v | X_j = x_j \textup{ for each $X_j$ which is a parent of $X_v$} \end{multline} \begin{keypointbox} The set of parents of $X_v$ is a subset of non-descendants of $X_v$ because the graph is acyclic. This means that we only need to know the parents of $X_v$ to know $P(X_v = x_v)$, and not the entirety of the other variables. \end{keypointbox} The basis for Bayesian networks is repeated application of the product rule for probabilities. So, for the example of \Cref{im:Bayesnet}, the joint probability $P(G, S, R)$ is split into $P(G|S, R)P(S|R)P(R)$ ($G$ is for grass wet, $S$ is for sprinklers and $R$ is for rain). \begin{figure}[h] \center \includegraphics[width=0.4\textwidth]{bayes-net.png} \caption{By AnAj - http://en.wikipedia.org/wiki/File:SimpleBayesNet.svg, Public Domain, https://commons.wikimedia.org/w/index.php?curid=24448318} \label{im:Bayesnet} \end{figure} If we now observe e.g. $P(G)$ (which is $P(G|S,R)$) then we can apply Bayes' theorem to calculate $P(S|R)$ and $P(R)$. \subsubsection{Curve fitting using Bayesian Networks} While Bayesian Networks cannot be used to fit a curve directly, they can make calculating the probability of the parameters (likelihood) given the observed data easier. In order to build the network for this use case we simply recall that all data points are random variables depending on $\mathbf{w}$, which is our set of parameters. This is shown in \Cref{im:dependency} \begin{figure}[h] \center \includegraphics[width=0.4\textwidth]{dependency.png} \caption{Dependency of the data points on the parameters} \label{im:dependency} \end{figure} In Bayesian Networks repeating variables are often combined using a blue box like the one visible in \Cref{im:parameters} to save space. \Cref{im:parameters} also contains the variables the parameters themselves depend on, as well as the standard deviation $\sigma^2$. \begin{figure}[h] \center \includegraphics[width=0.6\textwidth]{bnet-parameters.png} \caption{Calculating the likelihood of a given parameter set using a Bayesian network} \label{im:parameters} \end{figure} \subsubsection{Conditional Independence} Conditional Independence can be read directly from the structure of a Bayesian Network, using a process called D-separation. For D-separation we utilize the fact that there are three possible ways two nodes can be directly related: Head-to-Head, Head-to-Tail and Tail-to-Tail. The possibilities are shown in figure \Cref{im:relations}. \begin{figure}[h] \centering \begin{subfigure}[b]{0.3\textwidth} \includegraphics[width=\textwidth]{hth.png} \caption{Head-to-Head} \end{subfigure} \begin{subfigure}[b]{0.3\textwidth} \includegraphics[width=\textwidth]{htt.png} \caption{Head-to-Tail} \end{subfigure} \begin{subfigure}[b]{0.3\textwidth} \includegraphics[width=\textwidth]{ttt.png} \caption{Tail-to-Tail} \end{subfigure} \caption{The possible relations in Bayesian networks} \label{im:relations} \end{figure} Now, for actually determining whether or not two variables are conditionally independent we consider the path between them in the Bayesian Network. They are independent iff the path is blocked, and vice versa. Let's consider the case of a Head-to-Head connection. If $c$ is unobserved, the path is unblocked, which can be proven by marginalizing over $c$: \begin{equation} p(a,b) = \sum_c p(a|c) p(b|c) p(c) \neq \sum_c p(a|c) p(b|c) \quad \rightarrow \quad a \nbigCI b\ |\ \emptyset \end{equation} As soon as $c$ is observed however, this path becomes blocked: \begin{equation} p(a,b|c) = \frac{p(a,b,c)}{p(c)} = \frac{p(a|c) p(b|c) p(c)}{p(c)} = p(a|c) p(b|c) \quad \rightarrow \quad a \bigCI b\ |\ c \end{equation} The case for Head-to-Tail relationships is very similar, and also becomes blocked as $c$ is observed. Tail-to-Tail relationships are different, as they become \emph{unblocked} as $c$ is observed. \subsubsection{Markov Blanket or Markov Boundary} The Markov Blanket for node $x_n$ is the set of all nodes (parents and children) that $x_n$ depends on. This means that fixing all nodes in the Markov Blanket of $x_n$ allows us to calculate $x_n$, and splits it from the rest of the graph. By doing this repeatedly (especially in the case of directed relationships), we can fix all values by starting from our observations. The possibility of easily seeing where this is applicable is what makes graphical models so powerful. \subsection{Markov Random Fields} \subsection{Mixture of Gaussians} \subsection{Restricted Boltzmann Machines} \subsection{Neural Networks} \end{document}
{ "alphanum_fraction": 0.7462773912, "avg_line_length": 49.6096774194, "ext": "tex", "hexsha": "22785adef12e3330e1e749a92fc2024ced3aba86", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-01-23T14:22:13.000Z", "max_forks_repo_forks_event_min_datetime": "2019-01-23T14:22:13.000Z", "max_forks_repo_head_hexsha": "c93c84943a2c1a88e86e824d8f686a9561ddf625", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "alxshine/dws_summary", "max_forks_repo_path": "pmi/summary.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "c93c84943a2c1a88e86e824d8f686a9561ddf625", "max_issues_repo_issues_event_max_datetime": "2019-01-23T15:14:02.000Z", "max_issues_repo_issues_event_min_datetime": "2019-01-23T14:42:36.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "alxshine/dws_summary", "max_issues_repo_path": "pmi/summary.tex", "max_line_length": 262, "max_stars_count": 4, "max_stars_repo_head_hexsha": "c93c84943a2c1a88e86e824d8f686a9561ddf625", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "alxshine/dws_summary", "max_stars_repo_path": "pmi/summary.tex", "max_stars_repo_stars_event_max_datetime": "2019-01-23T14:49:41.000Z", "max_stars_repo_stars_event_min_datetime": "2018-01-24T09:12:48.000Z", "num_tokens": 4251, "size": 15379 }
\documentclass[11pt,a4paper]{report} \usepackage{isabelle,isabellesym} % further packages required for unusual symbols (see also % isabellesym.sty), use only when needed %\usepackage{amssymb} %for \<leadsto>, \<box>, \<diamond>, \<sqsupset>, \<mho>, \<Join>, %\<lhd>, \<lesssim>, \<greatersim>, \<lessapprox>, \<greaterapprox>, %\<triangleq>, \<yen>, \<lozenge> %\usepackage{eurosym} %for \<euro> %\usepackage[only,bigsqcap]{stmaryrd} %for \<Sqinter> %\usepackage{eufrak} %for \<AA> ... \<ZZ>, \<aa> ... \<zz> (also included in amssymb) %\usepackage{textcomp} %for \<onequarter>, \<onehalf>, \<threequarters>, \<degree>, \<cent>, %\<currency> % this should be the last package used \usepackage{pdfsetup} % urls in roman style, theory text in math-similar italics \urlstyle{rm} \isabellestyle{it} % for uniform font size %\renewcommand{\isastyle}{\isastyleminor} \begin{document} \title{CakeML} \author{Lars Hupel, Yu Zhang} \maketitle \begin{abstract} CakeML is a functional programming language with a proven-correct compiler and runtime system. This entry contains an unofficial version of the CakeML semantics that has been exported from the Lem specifications to Isabelle. Additionally, there are some hand-written theory files that adapt the exported code to Isabelle and port proofs from the HOL4 formalization, e.g.\ termination and equivalence proofs. \end{abstract} \tableofcontents \clearpage \section*{Contributors} The export script has been written by Lars Hupel. Hand-written theory files, including definitions and proofs, have been developed by Lars Hupel and Yu Zhang. Lem is a project by Peter Sewell et.al. Contributors can be found on its project page\footnote{\url{https://www.cl.cam.ac.uk/~pes20/lem/}} and on GitHub.\footnote{\url{https://github.com/rems-project/lem/graphs/contributors}} CakeML is a project with many developers and contributors that can be found on its project page\footnote{\url{https://cakeml.org/}} and on GitHub.\footnote{\url{https://github.com/CakeML/cakeml/graphs/contributors}} In particular, Fabian Immler and Johannes \AA{}man Pohjola have contributed Isabelle mappings for constants in the Lem specification of the CakeML semantics. % sane default for proof documents \parindent 0pt\parskip 0.5ex % generated text of all theories \input{session} % optional bibliography %\bibliographystyle{abbrv} %\bibliography{root} \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End:
{ "alphanum_fraction": 0.7444933921, "avg_line_length": 31.2125, "ext": "tex", "hexsha": "3515a431df9c22f35c6617ac23a6cf73ae2ddd14", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "de967b58d29a18b168646dd00747172af6637f44", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "larsrh/isabelle-cakeml", "max_forks_repo_path": "thy/document/root.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "de967b58d29a18b168646dd00747172af6637f44", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "larsrh/isabelle-cakeml", "max_issues_repo_path": "thy/document/root.tex", "max_line_length": 215, "max_stars_count": null, "max_stars_repo_head_hexsha": "de967b58d29a18b168646dd00747172af6637f44", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "larsrh/isabelle-cakeml", "max_stars_repo_path": "thy/document/root.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 689, "size": 2497 }
%////////////////////////////////////////////////////////////////////////////// % % Copyright (c) 2012-2017 Daniel Adler <[email protected]>, % Tassilo Philipp <[email protected]> % % Permission to use, copy, modify, and distribute this software for any % purpose with or without fee is hereby granted, provided that the above % copyright notice and this permission notice appear in all copies. % % THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES % WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF % MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR % ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES % WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN % ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF % OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. % %////////////////////////////////////////////////////////////////////////////// \subsection{SPARC64 Calling Convention} \paragraph{Overview} The SPARC family of processors is based on the SPARC instruction set architecture, which comes in basically tree revisions, V7, V8\cite{SPARCV8}\cite{SPARCSysV} and V9\cite{SPARCV9}\cite{SPARCV9SysV}. The former two are 32-bit (see previous chapter) whereas the latter refers to the 64-bit SPARC architecture. SPARC uses big endian byte order, however, V9 supports also little endian byte order, but for data access only, not instruction access.\\ \\ There are two probosals, one from Sun and one from Hal, which disagree on how to handle some aspects of this calling convention.\\ \paragraph{\product{dyncall} support} \product{dyncall} fully supports the SPARC 64-bit instruction set (V9), for calls and callbacks. \subsubsection{SPARC (64-bit) Calling Convention} \begin{itemize} \item 32 double precision floating point registers (d0,d2,d4,...,d62, usable as 16 quad precision ones q0,q4,q8,...g60, and also first half of them are usable as 32 single precision registers f0-f31) \item 32 64-bit integer/pointer registers out of a bigger (vendor/model dependent) number that are accessible at a time (8 are global ones (g*), whereas the remaining 24 form a register window with 8 input (i*), 8 output (o*) and 8 local (l*) ones) \item calling a function shifts the register window, the old output registers become the new input registers (old local and input ones are not accessible anymore) \item stack and frame pointer are offset by a BIAS of 2047 (see official doc for reasons) \end{itemize} \begin{table}[h] \begin{tabular*}{0.95\textwidth}{lll} Name & Alias & Brief description\\ \hline {\bf \%g0} & \%r0 & Read-only, hardwired to 0 \\ {\bf \%g1-\%g7} & \%r1-\%r7 & Global \\ {\bf \%o0-\%o3 and \%i0-\%i3} & \%r8-\%r11 and \%r24-\%r27 & Output and input argument registers, return value \\ {\bf \%o4,\%o5 and \%i4,\%i5} & \%r12,\%r13 and \%r28,\%r29 & Output and input argument registers \\ {\bf \%o6 and \%i6} & \%r14 and \%r30, \%sp and \%fp & Stack and frame pointer (NOTE, value is pointing to stack/frame minus a BIAS of 2047) \\ {\bf \%o7 and \%i7} & \%r15 and \%r31 & Return address (caller writes to o7, callee uses i7) \\ {\bf \%l0-\%l7} & \%r16-\%r23 & preserve \\ {\bf \%d0,\%d2,\%d4,\%d6} & & Floating point arguments, return value \\ {\bf \%d8,\%d10,...,\%d30} & & Floating point arguments \\ {\bf \%d32,\%d36,...,\%d62} & & scratch (but, according do Hal, \%d16,...,\%d46 are preserved) \\ \end{tabular*} \caption{Register usage on sparc64 calling convention} \end{table} \paragraph{Parameter passing} \begin{itemize} \item stack grows down \item stack parameter order: right-to-left \item caller cleans up the stack \item stack frame is always aligned to 16 bytes \item first 6 integers are passed in registers using \%o0-\%o5 \item first 8 quad precision floating point args (or 16 double precision, or 32 single precision) are passed in floating point registers (\%q0,\%q4,...,\%q28 or \%d0,\%d2,...,\%d30 or \%f0-\%f32, respectively) \item for every other argument the stack is used \item single precision floating point args are passed in odd \%f* registers, and are "right aligned" in their 8-byte space on the stack \item for every argument passed, corresponding \%o*, \%f* register or stack space is skipped (e.g. passing a doube as 3rd call argument, \%d4 is used and \%o2 is skipped) \item all arguments \textless=\ 64 bit are passed as 64 bit values \item minimum stack size is 128 bytes, b/c stack pointer must always point at enough space to store all \%i* and \%l* registers, used when running out of register windows \item if needed, register spill area (for integer arguments passed via \%o0-\%o5) is adjacent to parameters \item results are expected by caller to be returned in \%o0-\%o3 (after reg window restore, meaning callee writes to \%i0-\%i3) for integers, \%d0,\%d2,\%d4,\%d6 for floats \item structs/unions up to 32b, the fields are returned via the respective registers mentioned in the previous bullet point \item for structs/unions \textgreater= 32b, the caller allocates the space and a pointer to it is passed as hidden first parameter to the function called (meaning in \%o0) \end{itemize} \paragraph{Stack layout} Stack directly after function prolog:\\ \begin{figure}[h] \begin{tabular}{5|3|1 1} \hhline{~-~~} & \vdots & & \\ \hhline{~=~~} local data (and padding) & \hspace{4cm} & & \mrrbrace{8}{caller's frame} \\ \hhline{~-~~} \mrlbrace{6}{parameter area} & argument x & \mrrbrace{3}{stack parameters} & \\ & \ldots & & \\ & argument 6 & & \\ & input argument 5 spill & \mrrbrace{3}{spill area} & \\ & \ldots & & \\ & input argument 0 spill & & \\ \hhline{~-~~} register save area (\%i* and \%l*) & & & \\ \hhline{~=~~} local data (and padding) & & & \mrrbrace{3}{current frame} \\ \hhline{~-~~} parameter area & & & \\ \hhline{~-~~} & \vdots & & \\ \hhline{~-~~} \end{tabular} \\ \\ \\ \caption{Stack layout on sparc64 calling convention} \end{figure}
{ "alphanum_fraction": 0.5520888419, "avg_line_length": 66.9380530973, "ext": "tex", "hexsha": "e56c1b9d491bad10f5de36535ca9928783ab1b30", "lang": "TeX", "max_forks_count": 14, "max_forks_repo_forks_event_max_datetime": "2022-03-27T14:56:56.000Z", "max_forks_repo_forks_event_min_datetime": "2021-01-08T05:05:19.000Z", "max_forks_repo_head_hexsha": "5a6ce78918932bbd236bfa29112f11907d782655", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jeffpanici75/ryu", "max_forks_repo_path": "ext/dyncall-1.0/doc/manual/callconvs/callconv_sparc64.tex", "max_issues_count": 79, "max_issues_repo_head_hexsha": "5a6ce78918932bbd236bfa29112f11907d782655", "max_issues_repo_issues_event_max_datetime": "2020-11-17T13:40:06.000Z", "max_issues_repo_issues_event_min_datetime": "2018-08-01T11:50:45.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jeffpanici75/ryu", "max_issues_repo_path": "ext/dyncall-1.0/doc/manual/callconvs/callconv_sparc64.tex", "max_line_length": 249, "max_stars_count": 35, "max_stars_repo_head_hexsha": "5a6ce78918932bbd236bfa29112f11907d782655", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jeffpanici75/ryu", "max_stars_repo_path": "ext/dyncall-1.0/doc/manual/callconvs/callconv_sparc64.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-28T18:41:05.000Z", "max_stars_repo_stars_event_min_datetime": "2018-02-01T20:53:13.000Z", "num_tokens": 1784, "size": 7564 }
% This chapter was modified on 1/12/05. %\setcounter{chapter}{7} \chapter{Law of Large Numbers}\label{chp 8} \section[Discrete Random Variables]{Law of Large Numbers for Discrete Random Variables} \label{sec 8.1} We are now in a position to prove our first fundamental theorem of probability. We have seen that an intuitive way to view the probability of a certain outcome is as the frequency with which that outcome occurs in the long run, when the experiment is repeated a large number of times. We have also defined probability mathematically as a value of a distribution function for the random variable representing the experiment. The Law of Large Numbers, which is a theorem proved about the mathematical model of probability, shows that this model is consistent with the frequency interpretation of probability. This theorem is sometimes called the \emx {law of averages.} To find out what would happen if this law were not true, see the article by Robert M. Coates.\index{COATES, R. M.}\footnote{R.~M.~Coates, ``The Law," \emx {The World of Mathematics,} ed. James R. Newman (New York: Simon and Schuster, 1956.} \subsection*{Chebyshev Inequality} To discuss the Law of Large Numbers, we first need an important inequality called the \emx {Chebyshev Inequality.} \begin{theorem}{\bf (Chebyshev Inequality)}\index{Chebyshev Inequality} Let $X$ be a discrete random variable with expected value $\mu = E(X)$, and let $\epsilon > 0$ be any positive real number. Then $$ P(|X - \mu| \geq \epsilon) \leq \frac {V(X)}{\epsilon^2}\ . $$ \proof Let $m(x)$ denote the distribution function of $X$. Then the probability that $X$ differs from $\mu$ by at least $\epsilon$ is given by $$P(|X - \mu| \geq \epsilon) = \sum_{|x - \mu| \geq \epsilon} m(x)\ .$$ We know that $$V(X) = \sum_x (x - \mu)^2 m(x)\ ,$$ and this is clearly at least as large as $$\sum_{|x - \mu| \geq \epsilon} (x - \mu)^2 m(x)\ ,$$ since all the summands are positive and we have restricted the range of summation in the second sum. But this last sum is at least \begin{eqnarray*} \sum_{|x - \mu| \geq \epsilon} \epsilon^2 m(x) &=& \epsilon^2 \sum_{|x - \mu| \geq \epsilon} m(x) \\ &=& \epsilon^2 P(|X - \mu| \geq \epsilon)\ .\\ \end{eqnarray*} So, $$ P(|X - \mu| \geq \epsilon) \leq \frac {V(X)}{\epsilon^2}\ . $$ \end{theorem} Note that $X$ in the above theorem can be any discrete random variable, and $\epsilon$ any positive number. \begin{example} Let $X$ by any random variable with $E(X) = \mu$ and $V(X) = \sigma^2$. Then, if $\epsilon = k\sigma$, Chebyshev's Inequality states that $$ P(|X - \mu| \geq k\sigma) \leq \frac {\sigma^2}{k^2\sigma^2} = \frac 1{k^2}\ . $$ Thus, for any random variable, the probability of a deviation from the mean of more than~$k$ standard deviations is ${} \leq 1/k^2$. If, for example, $k = 5$, $1/k^2 = .04$. \end{example} Chebyshev's Inequality is the best possible inequality in the sense that, for any $\epsilon > 0$, it is possible to give an example of a random variable for which Chebyshev's Inequality is in fact an equality. To see this, given $\epsilon > 0$, choose $X$ with distribution $$ p_X = \pmatrix{ -\epsilon & +\epsilon \cr 1/2 & 1/2 \cr}\ . $$ Then $E(X) = 0$, $V(X) = \epsilon^2$, and $$ P(|X - \mu| \geq \epsilon) = \frac {V(X)}{\epsilon^2} = 1\ . $$ We are now prepared to state and prove the Law of Large Numbers. \subsection*{Law of Large Numbers} \begin{theorem}{\bf (Law of Large Numbers)}\index{Law of Large Numbers} Let $X_1$,~$X_2$, \dots,~$X_n$ be an independent trials process, with finite expected value $\mu = E(X_j)$ and finite variance $\sigma^2 = V(X_j)$. Let $S_n = X_1 + X_2 +\cdots+ X_n$. Then for any $\epsilon > 0$, $$ P\left( \left| \frac {S_n}n - \mu \right| \geq \epsilon \right) \to 0 $$ as $n \rightarrow \infty$. Equivalently, $$ P\left( \left| \frac {S_n}n - \mu \right| < \epsilon \right) \to 1 $$ as $n \rightarrow \infty$. \proof Since $X_1$,~$X_2$, \dots,~$X_n$ are independent and have the same distributions, we can apply Theorem~\ref{thm 6.9}. We obtain $$ V(S_n) = n\sigma^2\ , $$ and $$ V (\frac {S_n}n) = \frac {\sigma^2}n\ . $$ Also we know that $$ E (\frac {S_n}n) = \mu\ . $$ By Chebyshev's Inequality, for any $\epsilon > 0$, $$ P\left( \left| \frac {S_n}n - \mu \right| \geq \epsilon \right) \leq \frac {\sigma^2}{n\epsilon^2}\ . $$ Thus, for fixed $\epsilon$, $$ P\left( \left| \frac {S_n}n - \mu \right| \geq \epsilon \right) \to 0 $$ as $n \rightarrow \infty$, or equivalently, $$ P\left( \left| \frac {S_n}n - \mu \right| < \epsilon \right) \to 1 $$ as $n \rightarrow \infty$. \end{theorem} \subsection*{Law of Averages} Note that $S_n/n$ is an average of the individual outcomes, and one often calls the Law of Large Numbers the ``law of averages." It is a striking fact that we can start with a random experiment about which little can be predicted and, by taking averages, obtain an experiment in which the outcome can be predicted with a high degree of certainty. The Law of Large Numbers, as we have stated it, is often called the ``Weak Law of Large Numbers" to distinguish it from the ``Strong Law of Large Numbers" described in Exercise~\ref{exer 8.1.16}. Consider the important special case of Bernoulli trials with probability~$p$ for success. Let $X_j = 1$ if the $j$th outcome is a success and~0 if it is a failure. Then $S_n = X_1 + X_2 +\cdots+ X_n$ is the number of successes in $n$ trials and $\mu = E(X_1) = p$. The Law of Large Numbers states that for any $\epsilon > 0$ $$ P\left( \left| \frac {S_n}n - p \right| < \epsilon \right) \to 1 $$ as $n \rightarrow \infty$. The above statement says that, in a large number of repetitions of a Bernoulli experiment, we can expect the proportion of times the event will occur to be near $p$. This shows that our mathematical model of probability agrees with our frequency interpretation of probability. \subsection*{Coin Tossing} Let us consider the special case of tossing a coin $n$ times with $S_n$ the number of heads that turn up. Then the random variable $S_n/n$ represents the fraction of times heads turns up and will have values between 0~and~1. The Law of Large Numbers predicts that the outcomes for this random variable will, for large~$n$, be near 1/2. In Figure~\ref{fig 8.1}, we have plotted the distribution for this example for increasing values of ~$n$. We have marked the outcomes between .45~and~.55 by dots at the top of the spikes. We see that as $n$ increases the distribution gets more and more concentrated around~.5 and a larger and larger percentage of the total area is contained within the interval $(.45,.55)$, as predicted by the Law of Large Numbers. \putfig{5.0truein}{PSfig8-1}{Bernoulli trials distributions.}{fig 8.1} \subsection*{Die Rolling} \begin{example} Consider $n$ rolls of a die. Let $X_j$ be the outcome of the $j$th roll. Then $S_n = X_1 + X_2 +\cdots+ X_n$ is the sum of the first $n$ rolls. This is an independent trials process with $E(X_j) = 7/2$. Thus, by the Law of Large Numbers, for any $\epsilon > 0$ $$ P\left( \left| \frac {S_n}n - \frac 72 \right| \geq \epsilon \right) \to 0 $$ as $n \rightarrow \infty$. An equivalent way to state this is that, for any $\epsilon > 0$, $$ P\left( \left| \frac {S_n}n - \frac 72 \right| < \epsilon \right) \to 1 $$ as $n \rightarrow \infty$. \end{example} \subsection*{Numerical Comparisons} It should be emphasized that, although Chebyshev's Inequality proves the Law of Large Numbers, it is actually a very crude inequality for the probabilities involved. However, its strength lies in the fact that it is true for any random variable at all, and it allows us to prove a very powerful theorem. \par In the following example, we compare the estimates given by Chebyshev's Inequality with the actual values. \begin{example} Let $X_1$,~$X_2$, \dots,~$X_n$ be a Bernoulli trials process with probability~.3 for success and~.7 for failure. Let $X_j = 1$ if the $j$th outcome is a success and~0 otherwise. Then, $E(X_j) = .3$ and $V(X_j) = (.3)(.7) = .21$. If $$ A_n = \frac {S_n}n = \frac {X_1 + X_2 +\cdots+ X_n}n $$ is the \emx {average} of the $X_i$, then $E(A_n) = .3$ and $V(A_n) = V(S_n)/n^2 = .21/n$. Chebyshev's Inequality states that if, for example, $\epsilon = .1$, $$ P(|A_n - .3| \geq .1) \leq \frac {.21}{n(.1)^2} = \frac {21}n\ . $$ Thus, if $n = 100$, $$ P(|A_{100} - .3| \geq .1) \leq .21\ , $$ or if $n = 1000$, $$ P(|A_{1000} - .3| \geq .1) \leq .021\ . $$ These can be rewritten as \begin{eqnarray*} P(.2 < A_{100} < .4) &\geq& .79\ , \\ P(.2 < A_{1000} < .4) &\geq& .979\ . \end{eqnarray*} These values should be compared with the actual values, which are (to six decimal places) \begin{eqnarray*} P(.2 < A_{100} < .4) &\approx& .962549 \\ P(.2 < A_{1000} < .4) &\approx& 1\ .\\ \end{eqnarray*} The program {\bf Law}\index{Law (program)} can be used to carry out the above calculations in a systematic way. \end{example} \subsection*{Historical Remarks} The Law of Large Numbers was first proved by the Swiss mathematician James Bernoulli\index{BERNOULLI, J.|(} in the fourth part of his work \emx {Ars Conjectandi} published posthumously in~1713.\footnote{J. Bernoulli, \emx {The Art of Conjecturing IV,} trans.~Bing Sung, Technical Report No.~2, Dept.\ of Statistics, Harvard Univ., 1966} As often happens with a first proof, Bernoulli's proof was much more difficult than the proof we have presented using Chebyshev's inequality. Chebyshev developed his inequality to prove a general form of the Law of Large Numbers (see Exercise~\ref{exer 8.1.13}). The inequality itself appeared much earlier in a work by Bienaym\'e,\index{BIENAYM\'E, I.} and in discussing its history Maistrov\index{MAISTROV, L.} remarks that it was referred to as the Bienaym\'e-Chebyshev Inequality for a long time.\footnote{L. E. Maistrov, \emx {Probability Theory: A Historical Approach,} trans.\ and ed.~Samual Kotz, (New York: Academic Press, 1974), p.~202} In \emx {Ars Conjectandi} Bernoulli provides his reader with a long discussion of the meaning of his theorem with lots of examples. In modern notation he has an event that occurs with probability~$p$ but he does not know $p$. He wants to estimate $p$ by the fraction $\bar{p}$ of the times the event occurs when the experiment is repeated a number of times. He discusses in detail the problem of estimating, by this method, the proportion of white balls in an urn that contains an unknown number of white and black balls. He would do this by drawing a sequence of balls from the urn, replacing the ball drawn after each draw, and estimating the unknown proportion of white balls in the urn by the proportion of the balls drawn that are white. He shows that, by choosing $n$ large enough he can obtain any desired accuracy and reliability for the estimate. He also provides a lively discussion of the applicability of his theorem to estimating the probability of dying of a particular disease, of different kinds of weather occurring, and so forth. In speaking of the number of trials necessary for making a judgement, Bernoulli observes that the ``man on the street" believes the ``law of averages." \begin{quote} Further, it cannot escape anyone that for judging in this way about any event at all, it is not enough to use one or two trials, but rather a great number of trials is required. And sometimes the stupidest man---by some instinct of nature \emx {per se} and by no previous instruction (this is truly amazing)--- knows for sure that the more observations of this sort that are taken, the less the danger will be of straying from the mark.\footnote{Bernoulli, op.\ cit., p.~38.} \end{quote} \noindent But he goes on to say that he must contemplate another possibility. \begin{quote} Something futher must be contemplated here which perhaps no one has thought about till now. It certainly remains to be inquired whether after the number of observations has been increased, the probability is increased of attaining the true ratio between the number of cases in which some event can happen and in which it cannot happen, so that this probability finally exceeds any given degree of certainty; or whether the problem has, so to speak, its own asymptote---that is, whether some degree of certainty is given which one can never exceed.\footnote{ibid., p.~39.} \end{quote} \noindent Bernoulli recognized the importance of this theorem, writing: \begin{quote} Therefore, this is the problem which I now set forth and make known after I have already pondered over it for twenty years. Both its novelty and its very great usefullness, coupled with its just as great difficulty, can exceed in weight and value all the remaining chapters of this thesis.\footnote{ibid., p.~42.} \end{quote} \noindent Bernoulli concludes his long proof with the remark: \begin{quote} Whence, finally, this one thing seems to follow: that if observations of all events were to be continued throughout all eternity, (and hence the ultimate probability would tend toward perfect certainty), everything in the world would be perceived to happen in fixed ratios and according to a constant law of alternation, so that even in the most accidental and fortuitous occurrences we would be bound to recognize, as it were, a certain necessity and, so to speak, a certain fate. I do now know whether Plato wished to aim at this in his doctrine of the universal return of things, according to which he predicted that all things will return to their original state after countless ages have past.\footnote{ibid., pp.~65--66.} \end{quote}\index{BERNOULLI, J.|)} \exercises \begin{LJSItem} \i\label{exer 8.1.1} A fair coin is tossed 100 times. The expected number of heads is~50, and the standard deviation for the number of heads is $(100 \cdot 1/2 \cdot 1/2)^{1/2} = 5$. What does Chebyshev's Inequality tell you about the probability that the number of heads that turn up deviates from the expected number 50 by three or more standard deviations (i.e., by at least 15)? \i\label{exer 8.1.100} Write a program that uses the function $\mbox {binomial}(n,p,x)$ to compute the exact probability that you estimated in Exercise~\ref{exer 8.1.1}. Compare the two results. \i\label{exer 8.1.101} Write a program to toss a coin 10{,}000 times. Let $S_n$ be the number of heads in the first $n$ tosses. Have your program print out, after every 1000 tosses, $S_n - n/2$. On the basis of this simulation, is it correct to say that you can expect heads about half of the time when you toss a coin a large number of times? \i\label{exer 8.1.102} A 1-dollar bet on craps has an expected winning of $-.0141$. What does the Law of Large Numbers say about your winnings if you make a large number of 1-dollar bets at the craps table? Does it assure you that your losses will be small? Does it assure you that if $n$ is very large you will lose? \i\label{exer 8.1.103} Let $X$ be a random variable with $E(X) =0$ and $V(X) = 1$. What integer value~$k$ will assure us that $P(|X| \geq k) \leq .01$? \i\label{exer 8.1.6} Let $S_n$ be the number of successes in $n$ Bernoulli trials with probability~$p$ for success on each trial. Show, using Chebyshev's Inequality, that for any $\epsilon > 0$ $$ P\left( \left| \frac {S_n}n - p \right| \geq \epsilon \right) \leq \frac {p(1 - p)}{n\epsilon^2}\ . $$ \i\label{exer 8.1.7} Find the maximum possible value for $p(1 - p)$ if $0 < p < 1$. Using this result and Exercise~\ref{exer 8.1.6}, show that the estimate $$ P\left( \left| \frac {S_n}n - p \right| \geq \epsilon \right) \leq \frac 1{4n\epsilon^2} $$ is valid for any $p$. \i\label{exer 8.1.104} A fair coin is tossed a large number of times. Does the Law of Large Numbers assure us that, if $n$ is large enough, with $\mbox {probability} > .99$ the number of heads that turn up will not deviate from $n/2$ by more than 100? \i\label{exer 8.1.105} In Exercise~\ref{sec 6.2}.\ref{exer 6.2.16}, you showed that, for the hat check problem, the number $S_n$ of people who get their own hats back has $E(S_n) = V(S_n) = 1$. Using Chebyshev's Inequality, show that $P(S_n \geq 11) \leq .01$ for any $n \geq 11$. \i\label{exer 8.1.106} Let $X$ by any random variable which takes on values 0,~1,~2, \dots,~$n$ and has $E(X) = V(X) = 1$. Show that, for any positive integer $k$, $$ P(X \geq k + 1) \leq \frac 1{k^2}\ . $$ \i\label{exer 8.1.107} We have two coins: one is a fair coin and the other is a coin that produces heads with probability 3/4. One of the two coins is picked at random, and this coin is tossed $n$ times. Let $S_n$ be the number of heads that turns up in these $n$ tosses. Does the Law of Large Numbers allow us to predict the proportion of heads that will turn up in the long run? After we have observed a large number of tosses, can we tell which coin was chosen? How many tosses suffice to make us 95~percent sure? \i\label{exer 8.1.13} (Chebyshev\index{CHEBYSHEV, P. L.}\footnote{P. L. Chebyshev, ``On Mean Values," \emx {J.\ Math.\ Pure.\ Appl.,} vol.~12 (1867), pp.~177--184.}) Assume that $X_1$,~$X_2$, \dots,~$X_n$ are independent random variables with possibly different distributions and let $S_n$ be their sum. Let $m_k = E(X_k)$, $\sigma_k^2 = V(X_k)$, and $M_n = m_1 + m_2 +\cdots+ m_n$. Assume that $\sigma_k^2 < R$ for all~$k$. Prove that, for any $\epsilon > 0$, $$ P\left( \left| \frac {S_n}n - \frac {M_n}n \right| < \epsilon \right) \to 1 $$ as $n \rightarrow \infty$. \i\label{exer 8.1.108} A fair coin is tossed repeatedly. Before each toss, you are allowed to decide whether to bet on the outcome. Can you describe a betting system with infinitely many bets which will enable you, in the long run, to win more than half of your bets? (Note that we are disallowing a betting system that says to bet until you are ahead, then quit.) Write a computer program that implements this betting system. As stated above, your program must decide whether to bet on a particular outcome before that outcome is determined. For example, you might select only outcomes that come after there have been three tails in a row. See if you can get more than 50\% heads by your ``system." \istar\label{exer 8.1.109} Prove the following analogue of Chebyshev's Inequality: $$ P(|X - E(X)| \geq \epsilon) \leq \frac 1\epsilon E(|X - E(X)|)\ . $$ \istar\label{exer 8.1.16} We have proved a theorem often called the ``Weak Law of Large Numbers." Most people's intuition and our computer simulations suggest that, if we toss a coin a sequence of times, the proportion of heads will really approach 1/2; that is, if $S_n$ is the number of heads in $n$ times, then we will have $$ A_n = \frac {S_n}n \to \frac 12 $$ as $n \to \infty$. Of course, we cannot be sure of this since we are not able to toss the coin an infinite number of times, and, if we could, the coin could come up heads every time. However, the ``Strong Law of Large Numbers,"\index{Strong Law of Large\\ Numbers} proved in more advanced courses, states that $$ P\left( \frac {S_n}n \to \frac 12 \right) = 1\ . $$ Describe a sample space $\Omega$ that would make it possible for us to talk about the event $$ E = \left\{\, \omega : \frac {S_n}n \to \frac 12\, \right\}\ . $$ Could we assign the equiprobable measure to this space? \choice{}{(See Example~\ref{exam 2.2.12}.)} \istar\label{exer 8.1.16.5} In this exercise, we shall construct an example of a sequence of random variables that satisfies the weak law of large numbers, but not the strong law. The distribution of $X_i$ will have to depend on $i$, because otherwise both laws would be satisfied. (This problem was communicated to us by David Maslen.) \vskip .1in Suppose we have an infinite sequence of mutually independent events $A_1, A_2, \ldots$. Let $a_i = P(A_i)$, and let $r$ be a positive integer. \begin{enumerate} \item Find an expression of the probability that none of the $A_i$ with $i>r$ occur. \item Use the fact that $x-1 \leq e^{-x}$ to show that $$ P(\mbox{No\ $A_i$\ with\ $i > r$\ occurs}) \leq e^{-\sum_{i=r}^{\infty} a_i} $$ \item (The first Borel-Cantelli lemma) Prove that if $\sum_{i=1}^{\infty} a_i$ diverges, then $$ P(\mbox{infinitely\ many\ $A_i$\ occur}) = 1. $$ \vskip .1in \noindent Now, let $X_i$ be a sequence of mutually independent random variables such that for each positive integer $i \geq 2$, $$ P(X_i = i) = \frac{1}{2i\log i}, \quad P(X_i = -i) = \frac{1}{2i\log i}, \quad P(X_i =0) = 1 - \frac{1}{i \log i}. $$ When $i=1$ we let $X_i=0$ with probability $1$. As usual we let $S_n = X_1 + \cdots + X_n$. Note that the mean of each $X_i$ is $0$. \item Find the variance of $S_n$. \item Show that the sequence $\langle X_i \rangle$ satisfies the Weak Law of Large Numbers, i.e. prove that for any $\epsilon > 0$ $$ P\biggl(\biggl|{\frac{S_n}{n}}\biggr| \geq \epsilon\biggr) \rightarrow 0\ , $$ as $n$ tends to infinity. \vskip .1in \noindent We now show that $\{ X_i \}$ does not satisfy the Strong Law of Large Numbers. Suppose that $S_n / n \rightarrow 0$. Then because $$ \frac{X_n}{n} = \frac{S_n}{n} - \frac{n-1}{n} \frac{S_{n-1}}{n-1}\ , $$ we know that $X_n / n \rightarrow 0$. From the definition of limits, we conclude that the inequality $|X_i| \geq \frac{1}{2} i$ can only be true for finitely many $i$. \item Let $A_i$ be the event $|X_i| \geq \frac{1}{2} i$. Find $P(A_i)$. Show that $\sum_{i=1}^{\infty} P(A_i)$ diverges (use the Integral Test). \item Prove that $A_i$ occurs for infinitely many $i$. \item Prove that $$ P\biggl(\frac{S_n}{n} \rightarrow 0\biggr) = 0, $$ and hence that the Strong Law of Large Numbers fails for the sequence $\{ X_i \}$. \end{enumerate} \istar\label{exer 8.1.110} Let us toss a biased coin that comes up heads with probability~$p$ and assume the validity of the Strong Law of Large Numbers as described in Exercise~\ref{exer 8.1.16}. Then, with probability~1, $$ \frac {S_n}n \to p $$ as $n \to \infty$. If $f(x)$ is a continuous function on the unit interval, then we also have $$ f\left( \frac {S_n}n \right) \to f(p)\ . $$ Finally, we could hope that $$ E\left(f\left( \frac {S_n}n \right)\right) \to E(f(p)) = f(p)\ . $$ Show that, if all this is correct, as in fact it is, we would have proven that any continuous function on the unit interval is a limit of polynomial functions. This is a sketch of a probabilistic proof of an important theorem in mathematics called the \emx {Weierstrass approximation theorem.}\index{Weierstrass Approximation Theorem} \end{LJSItem} \choice{}{\section[Continuous Random Variables]{Law of Large Numbers for Continuous Random Variables} \label{sec 8.2} In the previous section we discussed in some detail the Law of Large Numbers for discrete probability distributions. This law has a natural analogue for continuous probability distributions, which we consider somewhat more briefly here. \subsection*{\bf Chebyshev Inequality} \hfill\break\index{Chebyshev Inequality} Just as in the discrete case, we begin our discussion with the Chebyshev Inequality. \begin{theorem}{\bf (Chebyshev Inequality)} \index{Chebyshev Inequality} Let $X$ be a continuous random variable with density function $f(x)$. Suppose $X$ has a finite expected value $\mu = E(X)$ and finite variance $\sigma^2 = V(X)$. Then for any positive number $\epsilon > 0$ we have $$ P(|X - \mu| \geq \epsilon) \leq \frac {\sigma^2}{\epsilon^2}\ . $$ \end{theorem} The proof is completely analogous to the proof in the discrete case, and we omit it. \par Note that this theorem says nothing if $\sigma^2 = V(X)$ is infinite. \begin{example} Let $X$ be any continuous random variable with $E(X) = \mu$ and $V(X) = \sigma^2$. Then, if $\epsilon = k\sigma = k$ standard deviations for some integer~$k$, then $$ P(|X - \mu| \geq k\sigma) \leq \frac {\sigma^2}{k^2\sigma^2} = \frac 1{k^2}\ , $$ just as in the discrete case. \end{example} \subsection*{Law of Large Numbers} With the Chebyshev Inequality we can now state and prove the Law of Large Numbers for the continuous case. \begin{theorem}{\bf (Law of Large Numbers)}\index{Law of Large Numbers} Let $X_1$,~$X_2$, \dots,~$X_n$ be an independent trials process with a continuous density function~$f$, finite expected value~$\mu$, and finite variance~$\sigma^2$. Let $S_n = X_1 + X_2 +\cdots+ X_n$ be the sum of the $X_i$. Then for any real number $\epsilon > 0$ we have $$ \lim_{n \to \infty} P\left( \left| \frac {S_n}n - \mu \right| \geq \epsilon \right) = 0\ , $$ or equivalently, $$ \lim_{n \to \infty} P\left( \left| \frac {S_n}n - \mu \right| < \epsilon \right) = 1\ . $$ \end{theorem} Note that this theorem is not necessarily true if $\sigma^2$ is infinite (see Example~\ref{exam 8.2.5}). As in the discrete case, the Law of Large Numbers says that the average value of $n$ independent trials tends to the expected value as $n \to \infty$, in the precise sense that, given $\epsilon > 0$, the probability that the average value and the expected value differ by more than $\epsilon$ tends to~0 as $n \to \infty$. Once again, we suppress the proof, as it is identical to the proof in the discrete case. \subsection*{Uniform Case} \begin{example} Suppose we choose at random $n$ numbers from the interval $[0,1]$ with uniform distribution. Then if $X_i$ describes the $i$th choice, we have \begin{eqnarray*} \mu & = & E(X_i) = \int_0^1 x\, dx = \frac 12\ , \\ \sigma^2 & = & V(X_i) = \int_0^1 x^2\, dx - \mu^2 \\ & = & \frac 13 - \frac 14 = \frac 1{12}\ . \end{eqnarray*} Hence, \begin{eqnarray*} E \left( \frac {S_n}n \right) & = & \frac 12\ , \\ V \left( \frac {S_n}n \right) & = & \frac 1{12n}\ , \end{eqnarray*} and for any $\epsilon > 0$, $$ P \left( \left| \frac {S_n}n - \frac 12 \right| \geq \epsilon \right) \leq \frac 1{12n \epsilon^2}\ . $$ This says that if we choose $n$ numbers at random from $[0,1]$, then the chances are better than $1 - 1/(12n\epsilon^2)$ that the difference $|S_n/n - 1/2|$ is less than~$\epsilon$. Note that $\epsilon$ plays the role of the amount of error we are willing to tolerate: If we choose $\epsilon = 0.1$, say, then the chances that $|S_n/n - 1/2|$ is less than~0.1 are better than $1 - 100/(12n)$. For $n = 100$, this is about .92, but if $n = 1000$, this is better than .99 and if $n = 10{,}000$, this is better than .999. \putfig{5.0truein}{PSfig8-2}{Illustration of Law of Large Numbers --- uniform case.}{fig 8.2} We can illustrate what the Law of Large Numbers says for this example graphically. The density for $A_n = S_n/n$ is determined by $$ f_{A_n}(x) = nf_{S_n}(nx)\ . $$ We have seen in Section~\ref{sec 7.2}, that we can compute the density $f_{S_n}(x)$ for the sum of $n$ uniform random variables. In Figure~\ref{fig 8.2} we have used this to plot the density for $A_n$ for various values of~$n$. We have shaded in the area for which $A_n$ would lie between .45~and~.55. We see that as we increase $n$, we obtain more and more of the total area inside the shaded region. The Law of Large Numbers tells us that we can obtain as much of the total area as we please inside the shaded region by choosing $n$ large enough (see also Figure~\ref{fig 8.1}). \end{example} \subsection*{Normal Case} \begin{example} Suppose we choose $n$ real numbers at random, using a normal distribution with mean~0 and variance~1. Then \begin{eqnarray*} \mu &=& E(X_i) = 0\ , \\ \sigma^2 &=& V(X_i) = 1\ . \end{eqnarray*} Hence, \begin{eqnarray*} E \left( \frac {S_n}n \right) &=& 0\ , \\ V \left( \frac {S_n}n \right) &=& \frac 1n\ , \end{eqnarray*} and, for any $\epsilon > 0$, $$ P\left( \left| \frac {S_n}n - 0 \right| \geq \epsilon \right) \leq \frac 1{n\epsilon^2}\ . $$ In this case it is possible to compare the Chebyshev estimate for $P(|S_n/n - \mu| \geq \epsilon)$ in the Law of Large Numbers with exact values, since we know the density function for $S_n/n$ exactly (see Example~\ref{exam 7.12}). The comparison is shown in Table~\ref{table 8.1}, for $\epsilon = .1$. The data in this table was produced by the program {\bf LawContinuous}.\index{LawContinuous (program)} \begin{table} \centering \begin{tabular}{r|r|r} $n$ & $P(|S_n/n| \ge .1)$ & Chebyshev \\ \hline 100 & .31731 & 1.00000 \\ 200 & .15730 & .50000 \\ 300 & .08326 & .33333 \\ 400 & .04550 & .25000 \\ 500 & .02535 & .20000 \\ 600 & .01431 & .16667 \\ 700 & .00815 & .14286 \\ 800 & .00468 & .12500 \\ 900 & .00270 & .11111 \\ 1000 & .00157 & .10000 \\ \hline \end{tabular} \caption{Chebyshev estimates.} \label{table 8.1} \end{table} We see here that the Chebyshev estimates are in general \emx {not} very accurate. \end{example} \subsection*{Monte Carlo Method} Here is a somewhat more interesting example. \begin{example} Let $g(x)$ be a continuous function defined for $x \in [0,1]$ with values in $[0,1]$. In Section~\ref{sec 2.1}, we showed how to estimate the area of the region under the graph of $g(x)$ by the Monte Carlo method, that is, by choosing a large number of random values for $x$~and~$y$ with uniform distribution and seeing what fraction of the points $P(x,y)$ fell inside the region under the graph (see Example~\ref{exam 2.1.2}). \par Here is a better way to estimate the same area (see Figure~\ref{fig 8.3}). Let us choose a large number of independent values $X_n$ at random from $[0,1]$ with uniform density, set $Y_n = g(X_n)$, and find the average value of the $Y_n$. Then this average is our estimate for the area. To see this, note that if the density function for~$X_n$ is uniform, \begin{eqnarray*} \mu & = & E(Y_n) = \int_0^1 g(x) f(x)\, dx \\ & = & \int_0^1 g(x)\, dx \\ & = & \mbox {average\ value\ of\ } g(x)\ , \end{eqnarray*} while the variance is $$ \sigma^2 = E((Y_n - \mu)^2) = \int_0^1 (g(x) - \mu)^2\, dx < 1\ , $$ since for all $x$ in $[0, 1]$, $g(x)$ is in $[0, 1]$, hence $\mu$ is in $[0, 1]$, and so $|g(x) - \mu| \le 1$. Now let $A_n = (1/n)(Y_1 + Y_2 +\cdots+ Y_n)$. Then by Chebyshev's Inequality, we have $$ P(|A_n - \mu| \geq \epsilon) \leq \frac {\sigma^2}{n\epsilon^2} < \frac 1{n\epsilon^2}\ . $$ \putfig{3truein}{PSfig8-3}{Area problem.}{fig 8.3} This says that to get within $\epsilon$ of the true value for $\mu = \int_0^1 g(x)\, dx$ with probability at least $p$, we should choose $n$ so that $1/n\epsilon^2 \leq 1 - p$ (i.e., so that $n \geq 1/\epsilon^2(1 - p)$). Note that this method tells us how large to take $n$ to get a desired accuracy. \end{example} The Law of Large Numbers requires that the variance $\sigma^2$ of the original underlying density be finite: $\sigma^2 < \infty$. In cases where this fails to hold, the Law of Large Numbers may fail, too. An example follows. \subsection*{Cauchy Case} \begin{example}\label{exam 8.2.5} Suppose we choose $n$ numbers from $(-\infty,+\infty)$ with a Cauchy density with parameter $a = 1$. We know that for the Cauchy density the expected value and variance are undefined (see Example~\ref{exam 6.23}). In this case, the density function for $$ A_n = \frac {S_n}n $$ is given by (see Example~\ref{exam 7.9}) $$ f_{A_n}(x) = \frac 1{\pi(1 + x^2)}\ , $$ that is, \emx {the density function for $A_n$ is the same for all $n$.} In this case, as $n$ increases, the density function does not change at all, and the Law of Large Numbers does not hold. \end{example} \exercises \begin{LJSItem} \i\label{exer 8.2.1} Let $X$ be a continuous random variable with mean $\mu = 10$ and variance $\sigma^2 = 100/3$. Using Chebyshev's Inequality, find an upper bound for the following probabilities. \begin{enumerate} \item $P(|X - 10| \geq 2)$. \item $P(|X - 10| \geq 5)$. \item $P(|X - 10| \geq 9)$. \item $P(|X - 10| \geq 20)$. \end{enumerate} \i\label{exer 8.2.2} Let $X$ be a continuous random variable with values unformly distributed over the interval $[0,20]$. \begin{enumerate} \item Find the mean and variance of $X$. \item Calculate $P(|X - 10| \geq 2)$, $P(|X - 10| \geq 5)$, $P(|X - 10| \geq 9)$, and $P(|X - 10| \geq 20)$ exactly. How do your answers compare with those of Exercise~\ref{exer 8.2.1}? How good is Chebyshev's Inequality in this case? \end{enumerate} \i\label{exer 8.2.3} Let $X$ be the random variable of Exercise~\ref{exer 8.2.2}. \begin{enumerate} \item Calculate the function $f(x) = P(|X - 10| \geq x)$. \item Now graph the function $f(x)$, and on the same axes, graph the Chebyshev function $g(x) = 100/(3x^2)$. Show that $f(x) \leq g(x)$ for all~$x > 0$, but that $g(x)$ is not a very good approximation for~$f(x)$. \end{enumerate} \i\label{exer 8.2.100} Let $X$ be a continuous random variable with values exponentially distributed over $[0,\infty)$ with parameter $\lambda = 0.1$. \begin{enumerate} \item Find the mean and variance of $X$. \item Using Chebyshev's Inequality, find an upper bound for the following probabilities: $P(|X - 10| \geq 2)$, $P(|X - 10| \geq 5)$, $P(|X - 10| \geq 9)$, and $P(|X - 10| \geq 20)$. \item Calculate these probabilities exactly, and compare with the bounds in (b). \end{enumerate} \i\label{exer 8.2.101} Let $X$ be a continuous random variable with values normally distributed over $(-\infty,+\infty)$ with mean $\mu = 0$ and variance $\sigma^2 = 1$. \begin{enumerate} \item Using Chebyshev's Inequality, find upper bounds for the following probabilities: $P(|X| \geq 1)$, $P(|X| \geq 2)$, and $P(|X| \geq 3)$. \item The area under the normal curve between $-1$~and~1 is .6827, between $-2$~and~2 is .9545, and between $-3$~and~3 it is .9973 (see the table in Appendix~A). Compare your bounds in (a) with these exact values. How good is Chebyshev's Inequality in this case? \end{enumerate} \i\label{exer 8.2.102} If $X$ is normally distributed, with mean~$\mu$ and variance~$\sigma^2$, find an upper bound for the following probabilities, using Chebyshev's Inequality. \begin{enumerate} \item $P(|X - \mu| \geq \sigma)$. \item $P(|X - \mu| \geq 2\sigma)$. \item $P(|X - \mu| \geq 3\sigma)$. \item $P(|X - \mu| \geq 4\sigma)$. \end{enumerate} \noindent Now find the exact value using the program {\bf NormalArea}\index{NormalArea (program)} or the normal table in Appendix~A, and compare. \i\label{exer 8.2.103} If $X$ is a random variable with mean~$\mu \ne 0$ and variance~$\sigma^2$, define the \emx {relative deviation} $D$ of $X$ from its mean by $$ D = \left| \frac {X - \mu}\mu \right|\ . $$ \begin{enumerate} \item Show that $P(D \geq a) \leq \sigma^2/(\mu^2a^2)$. \item If $X$ is the random variable of Exercise~\ref{exer 8.2.1}, find an upper bound for $P(D \geq .2)$, $P(D \geq .5)$, $P(D \geq .9)$, and $P(D \geq 2)$. \end{enumerate} \i\label{exer 8.2.104} Let $X$ be a continuous random variable and define the {\em standardized version} $X^*$ of~$X$ by: $$ X^* = \frac {X - \mu}\sigma\ . $$ \begin{enumerate} \item Show that $P(|X^*| \geq a) \leq 1/a^2$. \item If $X$ is the random variable of Exercise~\ref{exer 8.2.1}, find bounds for $P(|X^*| \geq 2)$, $P(|X^*| \geq 5)$, and $P(|X^*| \geq 9)$. \end{enumerate} \i\label{exer 8.2.105} \begin{enumerate} \item Suppose a number $X$ is chosen at random from $[0,20]$ with uniform probability. Find a lower bound for the probability that $X$ lies between 8~and~12, using Chebyshev's Inequality. \item Now suppose 20 real numbers are chosen independently from $[0,20]$ with uniform probability. Find a lower bound for the probability that their average lies between 8~and~12. \item Now suppose 100 real numbers are chosen independently from $[0,20]$. Find a lower bound for the probability that their average lies between 8~and~12. \end{enumerate} \i\label{exer 8.2.106} A student's score on a particular calculus final is a random variable with values of $[0,100]$, mean~70, and variance~25. \begin{enumerate} \item Find a lower bound for the probability that the student's score will fall between 65~and~75. \item If 100 students take the final, find a lower bound for the probability that the class average will fall between 65~and~75. \end{enumerate} \i\label{exer 8.2.107} The Pilsdorff beer company runs a fleet of trucks along the 100~mile road from Hangtown to Dry Gulch, and maintains a garage halfway in between. Each of the trucks is apt to break down at a point $X$~miles from Hangtown, where $X$ is a random variable uniformly distributed over $[0,100]$. \begin{enumerate} \item Find a lower bound for the probability $P(|X - 50| \leq 10)$. \item Suppose that in one bad week, 20 trucks break down. Find a lower bound for the probability $P(|A_{20} - 50| \leq 10)$, where $A_{20}$ is the average of the distances from Hangtown at the time of breakdown. \end{enumerate} \i\label{exer 8.2.12} A share of common stock in the Pilsdorff beer company has a price $Y_n$ on the $n$th business day of the year. Finn observes that the price change $X_n = Y_{n + 1} - Y_n$ appears to be a random variable with mean $\mu = 0$ and variance $\sigma^2 =1/4$. If $Y_1 = 30$, find a lower bound for the following probabilities, under the assumption that the $X_n$'s are mutually independent. \begin{enumerate} \item $P(25 \leq Y_2 \leq 35)$. \item $P(25 \leq Y_{11} \leq 35)$. \item $P(25 \leq Y_{101} \leq 35)$. \end{enumerate} \i\label{exer 8.2.108} Suppose one hundred numbers $X_1$,~$X_2$, \dots,~$X_{100}$ are chosen independently at random from $[0,20]$. Let $S = X_1 + X_2 +\cdots+ X_{100}$ be the sum, $A = S/100$ the average, and $S^* = (S - 1000)/(10/\sqrt3)$ the standardized sum. Find lower bounds for the probabilities \begin{enumerate} \item $P(|S - 1000| \leq 100)$. \item $P(|A - 10| \leq 1)$. \item $P(|S^*| \leq \sqrt3)$. \end{enumerate} \i\label{exer 8.2.14} Let $X$ be a continuous random variable normally distributed on $(-\infty,+\infty)$ with mean~0 and variance~1. Using the normal table provided in Appendix~A, or the program {\bf NormalArea}, find values for the function $f(x) = P(|X| \geq x)$ as $x$ increases from 0~to~4.0 in steps of~.25. Note that for $x \geq 0$ the table gives $ NA(0,x) = P(0 \leq X \leq x)$ and thus $P(|X| \geq x) = 2(.5 - NA(0,x)$. Plot by hand the graph of~$f(x)$ using these values, and the graph of the Chebyshev function $g(x) = 1/x^2$, and compare (see Exercise~\ref{exer 8.2.3}). \i\label{exer 8.2.109} Repeat Exercise~\ref{exer 8.2.14}, but this time with mean~10 and variance~3. Note that the table in Appendix~A presents values for a standard normal variable. Find the standardized version $X^*$ for~$X$, find values for $f^*(x) = P(|X^*| \geq x)$ as in Exercise~\ref{exer 8.2.14}, and then rescale these values for $f(x) = P(|X -10| \geq x)$. Graph and compare this function with the Chebyshev function $g(x) = 3/x^2$. \i\label{exer 8.2.110} Let $Z = X/Y$ where $X$~and~$Y$ have normal densities with mean~0 and standard deviation~1. Then it can be shown that $Z$ has a Cauchy density. \begin{enumerate} \item Write a program to illustrate this result by plotting a bar graph of 1000 samples obtained by forming the ratio of two standard normal outcomes. Compare your bar graph with the graph of the Cauchy density. Depending upon which computer language you use, you may or may not need to tell the computer how to simulate a normal random variable. A method for doing this was described in Section~\ref{sec 5.2}. \item We have seen that the Law of Large Numbers does not apply to the Cauchy density (see Example~\ref{exam 8.2.5}). Simulate a large number of experiments with Cauchy density and compute the average of your results. Do these averages seem to be approaching a limit? If so can you explain why this might be? \end{enumerate} \i\label{exer 8.2.111} Show that, if $X \geq 0$, then $P(X \geq a) \leq E(X)/a$. \i\label{exer 8.2.112} (Lamperti\footnote{Private communication.}) \index{LAMPERTI, J.} Let $X$ be a non-negative random variable. What is the best upper bound you can give for $P(X \geq a)$ if you know \begin{enumerate} \item $E(X) = 20$. \item $E(X) = 20$ and $V(X) = 25$. \item $E(X) = 20$, $V(X) = 25$, and $X$ is symmetric about its mean. \end{enumerate} \end{LJSItem}}
{ "alphanum_fraction": 0.6983356276, "avg_line_length": 42.6414087513, "ext": "tex", "hexsha": "e2a60717cde521ff635d9071646b40218de2c650", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "288c82a0cb94e6b9d702eb8803dc342052d411f9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kskyten/introduction-to-probability", "max_forks_repo_path": "tex/ch8.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "288c82a0cb94e6b9d702eb8803dc342052d411f9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kskyten/introduction-to-probability", "max_issues_repo_path": "tex/ch8.tex", "max_line_length": 99, "max_stars_count": null, "max_stars_repo_head_hexsha": "288c82a0cb94e6b9d702eb8803dc342052d411f9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kskyten/introduction-to-probability", "max_stars_repo_path": "tex/ch8.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 13259, "size": 39955 }
%############################################################################### \section{Change the Test Case} %------------------------------------------------------------------------------- \noindent In this section, detail of the configurations of three experiments in DCMIP2016 are described. Each configuration set in the directory of \texttt{scale-{\version}/scale-gm/test/case} is ready to go. At the first step, please use these sets as it is. \subsection{preparing directory} Change to the directory of target case. If you want to run test case 162, change to \texttt{scale-{\version}/scale-gm/test/case/DCMIP2016-12/}. After that, make a directory. The directory of gl05rl00z30pe40 already may exists, we assume create it newly. \begin{verbatim} $ mkdir gl05rl00z30pe40 $ cd gl05rl00z30pe40 \end{verbatim} \noindent Copy Makefile and configuration file from another directory of DCMIP2016 to the new direcotory, for example DCMIP2016-11. \begin{verbatim} $ cp ../../DCMIP2016-11/gl05rl00z30pe10/Makefile ./ $ cp ../../DCMIP2016-11/gl05rl00z30pe10/nhm_driver.cnf ./ \end{verbatim} \subsection{Edit configuration file: nhm\_driver.cnf} %-------------------- \vspace{0.5cm} \noindent {\large{\sf edit for test case 161: moist baroclinic wave}} (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb|$ vi nhm_driver.cnf | & \\ \verb| * snip * | & \\ \\ \verb| &RUNCONFPARAM | & \\ \verb| RUNNAME = 'DCMIP2016-11', | & {\verb| <--|} \\ \verb| NDIFF_LOCATION = 'IN_LARGE_STEP2', | & \\ \verb| THUBURN_LIM = .true., | & \\ \verb| ATMOS_PHY_TYPE = 'SIMPLE', | & \\ | & {\verb| <--|} \\ \verb| RAIN_TYPE = "WARM", | & \\ \verb| AF_TYPE = 'DCMIP', | & \\ \verb| / | & \\ \\ \verb| * snip * | & \\ \\ \verb| &DYCORETESTPARAM | & \\ \verb| init_type = 'Jablonowski-Moist', | & {\verb| <--|} \\ \verb| test_case = '1', | & {\verb| <--|}\\ \verb| chemtracer = .true., | & {\verb| <--|}\\ \verb| prs_rebuild = .false., | & {\verb| <--|}\\ \verb| / | & \\ \\ \verb| * snip * | & \\ \\ \verb| &FORCING_DCMIP_PARAM | & \\ \verb| SET_DCMIP2016_11 = .true., | & {\verb| <--|} \\ \verb| / | & \\ \\ \verb| * snip * | & \\ } \noindent \textcolor{blue}{{\sf Note}} \begin{itemize} \item "RUNNAME" should be specified as "DCMIP2016-11". \item "init\_type" should be specified as "Jablonowski-Moist". \item "ATMOS\_PHY\_TYPE" denotes what kind of physics parameterizations are used ("SIMPLE", "NONE", or ""(sophisticated) can be used). \item "test\_case" can be choose from 1 ~ 6.\\ case 1: perturbation: exponential / with moisture \\ case 2: perturbation: stream function / with moisture \\ case 3: perturbation: exponential / without moisture \\ case 4: perturbation: stream function / without moisture \\ case 5: no perturbation / with moisture \\ case 6: no perturbation / without moisture \item \verb|FORCING_DCMIP_PARAM| should be specified as "\verb|SET_DCMIP2016_11 = .true.|". \item "step" in \verb|NMHISD| should be changed following required history output interval as described in DCMIP2016 Test Case Document. \item items of history output variables, which specified by "NMHIST", should be added following the requirement in DCMIP2016 Test Case Document. \item "small\_planet\_factor" in PARAM\_CONST should be set as 1. \end{itemize} %-------------------- \vspace{0.5cm} \noindent {\large {\sf edit for test case 162: ideal tropical cyclone}} (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb| $ vi nhm_driver.cnf| & \\ \verb| * snip *| & \\ \\ \verb| &RUNCONFPARAM | & \\ \verb| RUNNAME = 'DCMIP2016-12',| & {\verb|<--|} \\ \verb| NDIFF_LOCATION = 'IN_LARGE_STEP2',| & \\ \verb| THUBURN_LIM = .true.,| & \\ \verb| RAIN_TYPE = "WARM",| & \\ \verb| AF_TYPE = 'DCMIP',| & \\ \verb| /| & \\ \\ \verb| * snip *| \\ \\ \verb| &DYCORETESTPARAM| \\ \verb| init_type = 'Tropical-Cyclone',| &{\verb|<--|} \\ \verb| /| & \\ \\ \verb| * snip * | & \\ \\ \verb| &FORCING_DCMIP_PARAM | & \\ \verb| SET_DCMIP2016_12 = .true.,| & {\verb|<--|} \\ \verb| / | & \\ \\ \verb| * snip * | & \\ } \noindent \textcolor{blue}{{\sf Note}} \begin{itemize} \item "RUNNAME" should be specified as "DCMIP2016-12". \item "init\_type" should be specified as "Tropical-Cyclone". \item \verb|FORCING_DCMIP_PARAM| should be specified as "\verb|SET_DCMIP2016_12 = .true.|". \item "step" in \verb|NMHISD| should be changed following required history output interval as described in DCMIP2016 Test Case Document. \item items of history output variables, which specified by "NMHIST", should be added following the requirement in DCMIP2016 Test Case Document. \item "small\_planet\_factor" in PARAM\_CONST should be set as 1. \end{itemize} %-------------------- \vspace{0.5cm} \noindent {\large{\sf edit for test case 163: supercell}} (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb| $ vi nhm_driver.cnf | & \\ \verb| * snip * | & \\ \\ \verb| &RUNCONFPARAM| & \\ \verb| RUNNAME = 'DCMIP2016-13', | & {\verb|<--|}\\ \verb| NDIFF_LOCATION = 'IN_LARGE_STEP2',| & {\verb|<--|}\\ \verb| THUBURN_LIM = .true.,| & \\ \verb| RAIN_TYPE = "WARM",| & \\ \verb| AF_TYPE = 'DCMIP',| & \\ \verb| /| & \\ \\ \verb| * snip *| & \\ \\ \verb| &DYCORETESTPARAM| & \\ \verb| init_type = 'Supercell', | & {\verb|<--|}\\ \verb| test_case = '1', | & {\verb|<--|}\\ \verb| /| & \\ \\ \verb| * snip * | & \\ \\ \verb| &FORCING_DCMIP_PARAM| & \\ \verb| SET_DCMIP2016_13 = .true., | & {\verb|<--|}\\ \verb| /| & \\ \\ \verb| * snip *| & \\ } \noindent \textcolor{blue}{{\sf Note}} \begin{itemize} \item "RUNNAME" should be specified as "DCMIP2016-13". \item "init\_type" should be specified as "Supercell". \item "test\_case" can be choose from 1 ~ 6.\\ case 1: with initial perturbation \\ case 2: without initial perturbation \item \verb|FORCING_DCMIP_PARAM| should be specified as "\verb|SET_DCMIP2016_13 = .true.|". \item "step" in \verb|NMHISD| should be changed following required history output interval as described in DCMIP2016 Test Case Document. \item items of history output variables, which specified by "NMHIST", should be added following the requirement in DCMIP2016 Test Case Document. \item \textcolor{red}{"small\_planet\_factor" in PARAM\_CONST should be set as 120}. \item \textcolor{red}{"earth\_angvel" in PARAM\_CONST should be set as 0}. \end{itemize} \noindent After above edit, you can run the experiment by the same manner in Section 1.4. \begin{verbatim} $ make run $ sh run.sh \end{verbatim} \section{Change Physics Schemes} %------------------------------------------------------------------------------- \noindent Default settings for each test cases in DCMIP2016 is set in the pre-existing configuration file. You can change these settings as you like. Note that we have not yet checked all the combinations of physics schemes for all test cases. \\ \noindent {\large{\sf use Large scale condensation instead of kessler}} \noindent The default setting for cloud microphysics is Kessler scheme. To use Large scale condensation (Reed and Jablonowski (2012) precip scheme), add "\verb|SET_DCMIP2016_LSC|" with true sign. An example for test case 161 is shown below. %-------------------- (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb| $ vi nhm_driver.cnf | & \\ \verb| * snip * | & \\ \\ \verb| &FORCING_DCMIP_PARAM | & \\ \verb| SET_DCMIP2016_11 = .true., | & \\ \verb| SET_DCMIP2016_LSC = .true., | & {\verb|<--|}\\ \verb| / | & \\ \\ \verb| * snip * | & \\ } \noindent {\large{\sf no cloud physics}} \noindent To run without any cloud physics, add "\verb|SET_DCMIP2016_DRY|" with true sign. An example for test case 161 is shown below. (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb| $ vi nhm_driver.cnf | & \\ \verb| * snip * | & \\ \\ \verb| &FORCING_DCMIP_PARAM | & \\ \verb| SET_DCMIP2016_11 = .true., | & \\ \verb| SET_DCMIP2016_DRY = .true., | & {\verb|<--|}\\ \verb| / | & \\ \\ \verb| * snip * | & \\ } \noindent {\large{\sf use George Bryan PBL}} \noindent The default setting for PBL scheme is Reed and Jablonowski (2012). To use George Bryan PBL, add "\verb|SM_PBL_Bryan|" with true sign. This option is available only for Tropical cyclone case (162). An example is shown below. (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb| $ vi nhm_driver.cnf | & \\ \verb| * snip * | & \\ \\ \verb| &FORCING_DCMIP_PARAM | & \\ \verb| SET_DCMIP2016_12 = .true., | & \\ \verb| SM_PBL_Bryan = .true., | & {\verb|<--|}\\ \verb| / | & \\ \\ \verb| * snip * | & \\ } \noindent {\large{\sf no physics}} \noindent To run any physics scheme, specify "NONE" to the parameter of AF\_TYPE in RUNCONFPARAM. An example for test case 161 is shown below. (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb| $ vi nhm_driver.cnf | & \\ \verb| * snip *| & \\ \\ \verb| &RUNCONFPARAM| & \\ \verb| RUNNAME = 'DCMIP2016-11',| & \\ \verb| NDIFF_LOCATION = 'IN_LARGE_STEP2',| & \\ \verb| THUBURN_LIM = .true.,| & \\ \verb| RAIN_TYPE = "WARM",| & \\ \verb| AF_TYPE = 'NONE',| & {\verb|<--|} \\ \verb| /| & \\ \\ \verb| * snip *| & \\ } \section{Use Sophisticated Physics Schemes} %------------------------------------------------------------------------------- \noindent You can also use more sophisticated physics schemes, like those used in SCLAE-RM. \\ \editboxtwo{ \verb| &TIMEPARAM| & \\ \verb| & DTL = 600.D0,| & \\ \verb| & DTL_rd = 1800.D0,| & {\verb|<--|}\\ \verb| & LSTEP_MAX = 50,| & \\ \verb| & INTEG_TYPE = "RK3",| & \\ \verb| & start_date = 0000,1,1,0,0,0| & \\ \verb| /| & \\ \verb| * snip *| & \\ \verb| $ vi nhm_driver.cnf | & \\ \verb| * snip * | & \\ \\ \verb| &RUNCONFPARAM| & \\ \verb| RUNNAME = 'DCMIP2016-13', | & \\ \verb| NDIFF_LOCATION = 'IN_LARGE_STEP2',| & \\ \verb| ATMOS_PHY_TYPE = '',| & {\verb|<--|}\\ \verb| ATMOS_PHY_MP_TYPE = 'TOMITA08',| & {\verb|<--|}\\ \verb| ATMOS_PHY_SF_TYPE = 'BULK',| & {\verb|<--|}\\ \verb| ATMOS_PHY_BL_TYPE = 'MYNN',| & {\verb|<--|}\\ \verb| ATMOS_PHY_RD_TYPE = 'MSTRNX',| & {\verb|<--|}\\ \verb| THUBURN_LIM = .true.,| & \\ \verb| RAIN_TYPE = "WARM",| & \\ \verb| AF_TYPE = 'DCMIP',| & \\ \verb| /| & \\ \\ \verb| * snip *| & \\ } \begin{itemize} \item "DTL\_rd" defines the interval of time integration. If not specified, DTL\_rd=DTL is assumed. \item If "ATMOS\_PHY\_TYPE" is neither set to ``SIMPLE'' nor ``NONE'', sophisticated parameterizations are used. \item ``TOMITA08'' has been tested. However, another type of microphisics schemes listed in Table V.5.1 could be used. \item ``BULK'' scheme can be used as surface flux parameterization. \item ``MYNN'' (Mellor-Yamada, Nakanishi-Niino) boundary layer scheme can be used \item ``mstrnX'' radiation scheme can be used \end{itemize} \section{Increase MPI processes} %------------------------------------------------------------------------------ \noindent To reduce elasped time of the model execution, we can increase number of MPI processes. For example, edit to change to use 40 MPI processes with g-level 5 in test case 161. To increase MPI processes up to 40, r-level should be rised from 0 to 1 because the upper limit of processes in r-level 0 is 10 processes. \subsection{preparing directory} %------------------------------------------------------------------------------ We assume in \texttt{scale-{\version}/scale-gm/test/case/DCMIP2016-11/} \begin{verbatim} $ mkdir gl05rl01z30pe40 <-- r-level is 1 $ cd gl05rl01z30pe40/ \end{verbatim} \noindent Copy Makefile and configuration file to new direcotory. \begin{verbatim} $ cp ../gl05rl00z30pe10/Makefile ./ $ cp ../gl05rl00z30pe10/nhm_driver.cnf ./ \end{verbatim} \subsection{Edit Makefile} %------------------------------------------------------------------------------ (symbols "\verb|<--|" means changed parameters) \\ On the Lines from 17 to 21, edit parameters. \editboxtwo{ \verb| $ vi Makefile |& \\ \verb| glevel = 5 |& \\ \verb| rlevel = 1 |& {\verb|<--|}\\ \verb| nmpi = 40 |& {\verb|<--|}\\ \verb| zlayer = 30 |& \\ \verb| vgrid = vgrid30_stretch_30km_dcmip2016.dat |& \\ } \subsection{Edit configuration file: nhm\_driver.cnf} %------------------------------------------------------------------------------ (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb| $ vi nhm_driver.cnf |&\\ \verb| * snip * |&\\ \verb| &ADMPARAM |&\\ \verb| glevel = 5, |&\\ \verb| rlevel = 1, |& {\verb|<--|} \\ \verb| vlayer = 30, |&\\ \verb| rgnmngfname = "rl01-prc40.info", |& {\verb|<--|} \\ \verb| / |&\\ \\ \verb| &GRDPARAM |&\\ \verb| hgrid_io_mode = "ADVANCED", |&\\ \verb| hgrid_fname = "boundary_GL05RL01", |& {\verb|<--|} \\ \verb| VGRID_fname = "vgrid30_stretch_30km_dcmip2016.dat", |&\\ \verb| vgrid_scheme = "LINEAR", |&\\ \verb| topo_fname = "NONE", |&\\ \verb| / |&\\ \\ \verb| * snip * |&\\ \\ \verb| &RESTARTPARAM |&\\ \verb| input_io_mode = 'IDEAL', |&\\ \verb| output_io_mode = 'ADVANCED', |&\\ \verb| output_basename = 'restart_all_GL05RL01z30', |& {\verb|<--|} \\ \verb| restart_layername = 'ZSALL32_DCMIP16', |&\\ \verb| / |&\\ } \noindent After above edit, you can run the experiment by the same manner in Section 1.4. \begin{verbatim} $ make run $ sh run.sh \end{verbatim} \section{Change grid spacing} %------------------------------------------------------------------------------ \noindent This is an example to change grid spacing of g-level 6 (approxi. 120 km) with 40 MPI processes in test case 161. When horizontal grid space is changed, some additional settings should be changed, for example, interval of time integration (DTL), maximum number of time steps (LSTEP\_MAX), numerical filter parameters, and output interval of history data. \subsection{preparing directory} %------------------------------------------------------------------------------ We assume in \texttt{scale-{\version}/scale-gm/test/case/DCMIP2016-11/} \begin{verbatim} $ mkdir gl06rl01z30pe40 <-- g-level is 6, and r-level is 1 $ cd gl06rl01z30pe40/ \end{verbatim} \noindent Copy Makefile and configuration file to new direcotory. \begin{verbatim} $ cp ../gl05rl00z30pe10/Makefile ./ $ cp ../gl05rl00z30pe10/nhm_driver.cnf ./ \end{verbatim} \subsection{Edit Makefile} %------------------------------------------------------------------------------ (symbols "\verb|<--|" means changed parameters) \\ On the Lines from 17 to 21, edit parameters. \editboxtwo{ \verb| $ vi Makefile | & \\ \verb| glevel = 6 | & {\verb|<--|} \\ \verb| rlevel = 1 | & {\verb|<--|} \\ \verb| nmpi = 40 | & {\verb|<--|} \\ \verb| zlayer = 30 | & \\ \verb| vgrid = vgrid30_stretch_30km_dcmip2016.dat | & \\ } \subsection{Edit configuration file: nhm\_driver.cnf} %------------------------------------------------------------------------------ \noindent A guideline of changing interval of time integration (DTL) is \\ {\sf take 1/2 of DTL by one up of g-level}. \noindent A guideline of changing numerical filter parameters is \\ {\sf take 1/8 of coefficient value by one up of g-level}. \\ \noindent (symbols "\verb|<--|" means changed parameters) \editboxtwo{ \verb| $ vi nhm_driver.cnf | & \\ \verb| * snip * | & \\ \verb| &ADMPARAM | & \\ \verb| glevel = 6, | & {\verb|<--|}\\ \verb| rlevel = 1, | & \\ \verb| vlayer = 30, | & \\ \verb| rgnmngfname = "rl01-prc40.info", | & {\verb|<--|}\\ \verb| / | & \\ \\ \verb| &GRDPARAM | & \\ \verb| hgrid_io_mode = "ADVANCED", | & \\ \verb| hgrid_fname = "boundary_GL06RL01", | & {\verb|<--|}\\ \verb| VGRID_fname = "vgrid30_stretch_30km_dcmip2016.dat", | & \\ \verb| vgrid_scheme = "LINEAR", | & \\ \verb| topo_fname = "NONE", | & \\ \verb| / | & \\ \\ \verb| &TIMEPARAM | & \\ \verb| DTL = 300.D0, | & {\verb|<--|}\\ \verb| INTEG_TYPE = "RK3", | & \\ \verb| LSTEP_MAX = 4320, | & {\verb|<--|}\\ \verb| start_date = 0000,1,1,0,0,0 | & \\ \verb| / | & \\ \\ \verb| * snip * | & \\ \\ \verb| &RESTARTPARAM | & \\ \verb| input_io_mode = 'IDEAL', | & \\ \verb| output_io_mode = 'ADVANCED', | & \\ \verb| output_basename = 'restart_all_GL06RL01z30', | & {\verb|<--|}\\ \verb| restart_layername = 'ZSALL32_DCMIP16', | & \\ \verb| / | & \\ \\ \verb| * snip * | & \\ \\ \verb| &NUMFILTERPARAM | & \\ \verb| lap_order_hdiff = 2, | & \\ \verb| hdiff_type = 'NONLINEAR1', | & \\ \verb| Kh_coef_maxlim = 1.500D+16, | & {\verb|<--|}\\ \verb| Kh_coef_minlim = 1.500D+15, | & {\verb|<--|}\\ \verb| ZD_hdiff_nl = 20000.D0, | & \\ \verb| divdamp_type = 'DIRECT', | & \\ \verb| lap_order_divdamp = 2, | & \\ \verb| alpha_d = 1.50D15, | & {\verb|<--|}\\ \verb| gamma_h_lap1 = 0.0D0, | & \\ \verb| ZD = 40000.D0, | & \\ \verb| alpha_r = 0.0D0, | & \\ \verb| / | & \\ \\ \verb| * snip * | & \\ \\ \verb| &NMHISD | & \\ \verb| output_io_mode = 'ADVANCED' , | & \\ \verb| histall_fname = 'history' , | & \\ \verb| hist3D_layername = 'ZSDEF30_DCMIP16', | & \\ \verb| NO_VINTRPL = .false. , | & \\ \verb| output_type = 'SNAPSHOT' , | & \\ \verb| step = 288 , | & {\verb|<--|}\\ \verb| doout_step0 = .true. , | & \\ \verb| / | & \\ } \noindent After above edit, you can run the experiment by the same manner in Section 1.4. \begin{verbatim} $ make run $ sh run.sh \end{verbatim}
{ "alphanum_fraction": 0.5422901244, "avg_line_length": 36.6288461538, "ext": "tex", "hexsha": "9177bb58707a4ffbf491c4a8724feefe4cb2a759", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-07-28T22:20:41.000Z", "max_forks_repo_forks_event_min_datetime": "2020-07-10T10:39:45.000Z", "max_forks_repo_head_hexsha": "ca4b476ad55cb728b2009f0427ce3f7161ecfcf7", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "slayoo/scale", "max_forks_repo_path": "doc/users-guide/en/gm_03conf.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "ca4b476ad55cb728b2009f0427ce3f7161ecfcf7", "max_issues_repo_issues_event_max_datetime": "2021-07-30T05:08:47.000Z", "max_issues_repo_issues_event_min_datetime": "2021-07-29T03:38:05.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "slayoo/scale", "max_issues_repo_path": "doc/users-guide/en/gm_03conf.tex", "max_line_length": 116, "max_stars_count": 7, "max_stars_repo_head_hexsha": "ca4b476ad55cb728b2009f0427ce3f7161ecfcf7", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "slayoo/scale", "max_stars_repo_path": "doc/users-guide/en/gm_03conf.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-13T05:29:55.000Z", "max_stars_repo_stars_event_min_datetime": "2020-06-14T11:12:31.000Z", "num_tokens": 6190, "size": 19047 }
\documentclass[12pt,twocolumn,tighten]{aastex62} %\pdfoutput=1 %for arXiv submission \usepackage{amsmath,amstext,amssymb} \usepackage[T1]{fontenc} \usepackage{apjfonts} \usepackage[figure,figure*]{hypcap} \usepackage{graphics,graphicx} \usepackage{hyperref} \usepackage{comment} \renewcommand*{\sectionautorefname}{Section} %for \autoref \renewcommand*{\subsectionautorefname}{Section} %for \autoref %% Reintroduced the \received and \accepted commands from AASTeX v5.2. %% Add "Submitted to " argument. %\received{---} %\revised{---} %\accepted{---} %\submitjournal{} \shortauthors{Bouma et al.} \shorttitle{Vetting Report Description Document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % BEGIN CUSTOM SHORT-CUT COMMANDS \newcommand{\stscilink}{\url{archive.stsci.edu/prepds/cdips}} \newcommand{\stscivetlink}{\url{archive.stsci.edu/prepds/cdips/vetting}} % END CUSTOM SHORT-CUT COMMANDS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%% %\NewPageAfterKeywords \begin{document} \title{ Cluster Difference Imaging Photometric Survey. Vetting Report Description Document } \correspondingauthor{L. G. Bouma} \email{[email protected]} \author[0000-0002-0514-5538]{L. G. Bouma} \affiliation{ Department of Astrophysical Sciences, Princeton University, 4 Ivy Lane, Princeton, NJ 08540, USA} % \author[0000-0001-8732-6166]{J. D. Hartman} \affiliation{ Department of Astrophysical Sciences, Princeton University, 4 Ivy Lane, Princeton, NJ 08540, USA} % \author[0000-0002-0628-0088]{W. Bhatti} \affiliation{ Department of Astrophysical Sciences, Princeton University, 4 Ivy Lane, Princeton, NJ 08540, USA} % \author[0000-0002-4265-047X]{J. N. Winn} \affiliation{ Department of Astrophysical Sciences, Princeton University, 4 Ivy Lane, Princeton, NJ 08540, USA} % \author[0000-0001-7204-6727]{G. \'A. Bakos} \affiliation{ Department of Astrophysical Sciences, Princeton University, 4 Ivy Lane, Princeton, NJ 08540, USA} \begin{abstract} To find planet candidates in clusters, we make vetting reports using our light-curves \citep{bouma_cdips_2019} and auxiliary data. This document describes the CDIPS planet candidate vetting reports uploaded by \texttt{lbouma} to ExoFOP-TESS 2019-09-18 through 2019-11-25. \\ \end{abstract} %\keywords{ % Transit photometry (1709), % Stellar ages (1581) %} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Vetting Report Description} \label{appendix:vetreport} The NASA team and MIT teams \citep{jenkins_spoc_2010,huang_tess_2018} produce vetting reports to assess the quality of planet candidates identified through their transiting planet serach pipelines. One goal of the CDIPS project is to detect transiting planets with known ages. Therefore our vetting reports include information to help assess {\it (a)} whether the transiting planet candidate is real, and {\it (b)} whether the reported age is correct. The code used to make these reports for the 2019-09-18 through 2019-11-25 planet candidates is available online\footnote{ \url{https://github.com/lgbouma/cdips/tree/a83a30}}. Figures~\ref{fig:pg1} to~\ref{fig:pg6} summarize the document construed for these purposes. The planet candidate chosen for these figures (Gaia-DR2 \texttt{5541111035713815552} = TIC \texttt{110718787}) was chosen in part because it passed all the tests. It was reported\footnote{\url{https://exofop.ipac.caltech.edu/tess/view_ctoi.php}} as a giant planet candidate in an open cluster on Sep 9, 2019. Higher resolution HATSouth photometry revealed about a month afterward that the dip signal comes from a neighboring eclipsing binary. \subsection{Transit search summary} \label{sec:pg1} Figure~\ref{fig:pg1}. Periodograms from TLS and phase-dispersion minimization, calculated with \texttt{astrobase.periodbase}, are shown in the top left and top center \citep{bhatti_astrobase_2018,hippke_TLS_2019,stellingwerf_period_1978}. The top three peaks from each method are shown in the second and third rows; the raw light-curve is in the top-right. A small finder chart from DSS is inset to the top left, with the 1.5-pixel radius aperture used to extract the light-curve in orange. \subsection{Light-curve diagnostics} \label{sec:pg2} Figure~\ref{fig:pg2}. Time-series of raw flux (\texttt{IRM2}), TFA-detrended flux (\texttt{TF2}), stellar-variability detrended flux, and the background are shown as a function of barycentric Julian date. The overplotted dashed vertical lines are the ephemeris of the highest-power TLS peak from Figure~\ref{fig:pg1}. An important visual check is whether the flux dips are correlated with changes in the background level -- in this case, they are not. The standard deviation and TESS magnitude are quoted in the upper right. The red line in the second from the top plot is a spline fit, which in this case an essential step for finding the eclipse signal. The windowed spline is an optional feature: it is only fitted and removed if the star is found to be ``variable'' (the Lomb-Scargle peak period is found with false alarm probability below $10^{-5}$). The spline is a robust penalized B-spline, which is a B-spline with knot-length automatically determined via cross-validation \citep{eilers_flexible_1996}. The idea behind the cross-validation is that more knots leads to smaller residuals on training data, but larger errors when tested on the entire dataset. We used the \texttt{wotan} implementation, which is a wrapper to the \texttt{pyGAM} spline fitter, with $2\sigma$ clipping of outliers from the fit residuals at each iteration \citep{serven_pygam_2018_1476122,hippke_wotan_2019}. The maximum number of spline knots was set to 50, which for each TESS sector ($\approx 25\,{\rm days}$) is commensurate with a $\approx0.5\,{\rm day}$ window. \subsection{Transit diagnostics} \label{sec:pg3} Figure~\ref{fig:pg3}. The plots show the maximally-detrended light-curve (top); the phase-folded light-curve centered over $\pm3$ transit durations of the primary transit (middle left); the secondary eclipse (middle right); the odd-numbered transits (lower left); and the even-numbered transits (lower right). The stellar parameters ($T_{\rm eff}, R_\star, M_\star$) are taken from TICv8 when available \citep{stassun_TIC8_2019}. %FIXME: if not, what happens?!?! %Gaia-DR2 Apsis pipeline results when available %\citep{andrae_apsis_2018}. The first eight lines of text are parameters determined from the best-fitting TLS model. The one exception is the planet radius, which uses the stellar radius as noted above. The ``flux contamination'' (\texttt{TICCONT}) from neighboring stars is {\it never} taken into account, because transit depth dilution does not affect image subtraction analyses in the same manner as aperture-photometry reductions. The significance of the odd-to-even asymmetry is quoted, but given the strong rotational variability in this object (Figure~\ref{fig:pg2}), the apparent odd-even asymmetry could have been caused by the detrending process. To estimate the transit to occulation depth ratio $\delta_{\rm tra}/\delta_{\rm occ}$, the phase-folded light-curve is also fit by a sum of two gaussians (in this case, the fit failed). ``AstExc'' refers to the Gaia-DR2 astrometric excess, which can indicate hints of astrometric binarity in the system. ``$d_{\rm geom}$'' is the geometric distance from \citet{bailer-jones_distances_2018}. ``$R_\star + M_\star \rightarrow T_{b0}$'' gives the duration of a zero-eccentricity central transit based on the TICv8 stellar radius and mass if available. If the mass is not available, a stellar mass is interpolated from the \citet{pecaut_intrinsic_2013} table, under the assumption that the star is a dwarf. %\footnote{\url{http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt}}. \subsection{Light-curves for increasing aperture sizes} \label{sec:pg4} Figure~\ref{fig:pg4}. Apertures of radius 1, 1.5, and 2.25 pixels are shown from top to bottom. The blue line is the reference transit depth from the best-fitting TLS model. Changes in depth with increasing aperture size can indicate that the source of variability is off-center from the aperture, suggesting a photometric blend. \subsection{Cluster membership assessment diagnostics} \label{sec:pg5} Figure~\ref{fig:pg5}. The star was considered a candidate cluster member by the source(s) listed under ``Reference'', in this case \citet{cantat-gaudin_gaia_2018}. The name used in their catalog in this case was \texttt{5599752663752776192}, a Gaia-DR2 identifier, which can be back-referenced to find that \citet{cantat-gaudin_gaia_2018} assigned this star a membership probability in Haffner 13 of just 10\%. The base catalog for the plots is chiefly that of \citet{Kharchenko_et_al_2013}, due to its homogeneous parameter determination procedure (particularly for age). If a match to the \citet{Kharchenko_et_al_2013} catalog is found, then the remaining plots are populated. Top-left shows the parallax, with orange points sampled from the Gaia-DR2 posterior, black points the other cluster members in the Kharchenko catalog, and the blue line the claimed Kharchenko parallax for the cluster. A number of field contaminants in the Kharchenko catalog are visible in this case. Top-right are the Gaia proper motions, where against black points are cluster members from Kharchenko, and the orange is the target star. Bottom-left is the color-magnitude diagram, and bottom-right are the on-sky positions. In the text, \texttt{N1sr2} is the number of $1\sigma$ cluster members reported by \citet{Kharchenko_et_al_2013} within the cluster angular radius; $\log t$ is the base-10 logarithm of the age in years; \texttt{type} matches the type codes provided by \citet{Kharchenko_et_al_2013}; \texttt{K13Note} gives the description of the cluster from \citet{Kharchenko_et_al_2013}, if available. Extra caution must be taken when interpreting this set of plots, since they can only show disagreement between the observed star's properties and those of the listed Kharchenko members (and the latter may be biased). \subsection{Imaging variability diagnostics} \label{sec:pg6} Figure~\ref{fig:pg6}. This page helps diagnose which stars are producing the observed variability. Top-left and top-center are the mean out-of-transit (OOT) and mean in-transit calibrated images (separate from any of our image-subtraction analysis). The OOT images are based on the same number of exposures as the in-transit images and split evenly before and after each transit \citep[following][]{bryson_identification_2013,kostov_l9859_2019}. The yellow star is the target; cyan dots are the flux-weighted centroid of the entire image for each transit event; small red crosses are WCS-projected locations of neighbor stars. Middle-left is the most important sub-panel: the difference between the OOT and in-transit mean images. If the variability shown in background map (units: ADU) is off-target, the transit is typically not from the target star. Middle-center is the same, normalized by the uncertainty map. Lower left and lower center show the DSS field in linear and log scales at roughly the same pixel scale as the TESS image, with the 1, 1.5, and 2.25 pixel-radius apertures in blue, orange, and green respectively. The brightness of neighborhood stars is given on the far right. Note the slight coordinate rotation difference between DSS and TESS images; DSS images are aligned north-up, east-left; TESS images are oriented as closely as possible to this system without actually performing the rotation. \section{Neighborhood plots} The standard vetting report's neighborhood analysis is helpful, but insufficient for determination of cluster membership. A more thorough approach is to query Gaia-DR2 for nearby stars in position, parallax, and proper motion space, and let the data speak for itself regarding {\it (a)} the existence of the group, and {\it (b)} the target star's membership within the group. For these plots, the ``neighborhood'' is defined as a group of at most $10^4$ randomly selected stars within: \begin{align} \langle \alpha \rangle \pm 5\sigma_\alpha, \\ \langle \delta \rangle \pm 5\sigma_\delta, \\ \langle \pi \rangle \pm 5\sigma_\pi, \end{align} where $(\alpha, \delta, \pi)$ are the right ascension, declination, and parallax. $\langle x \rangle$ denotes the mean over all stars within the claimed cluster, $\sigma_x$ denotes the standard deviation. The limiting $G$ magnitude for the ``neighborhood'' is set to 18 for \citet{cantat-gaudin_gaia_2018} groups, and 16 for \cite{Kharchenko_et_al_2013} groups. For the 2019-09-18 through 2019-11-25 deliveries, these plots were made with code available online\footnote{ \url{https://github.com/lgbouma/cdips_followup/tree/e4d9d}}. \subsection{Neighborhood diagnostic} \label{sec:nbhd} Figure~\ref{fig:nbhd} shows the labelled quantities from the target star, the neighborhood, and the ``cluster members'' reported by either \citet{cantat-gaudin_gaia_2018} or \citet{Kharchenko_et_al_2013}. The top three subplots intentionally omit the labelled cluster members, in order to give the user their own by-eye assessment of whether they see clusters in the neighborhood, and whether the target star is within those clusters. \subsection{Neighborhood diagnostic, with overplot} \label{sec:nbdh_extra} Figure~\ref{fig:nbhd_extra}. Same as Figure~\ref{fig:nbhd}, but with overplotted cluster members on the upper three subplots. \begin{figure*}[!h] \begin{center} \leavevmode \includegraphics[width=0.8\textwidth]{gaiatwo0005599752663752776192-0007_page01.pdf} \end{center} \vspace{-0.5cm} \caption{ {\bf Transit search summary.} See \S~\ref{sec:pg1}. \label{fig:pg1} } \end{figure*} \begin{figure*}[!h] \begin{center} \leavevmode \includegraphics[width=0.8\textwidth]{gaiatwo0005599752663752776192-0007_page02.pdf} \end{center} \vspace{-0.5cm} \caption{ {\bf Light-curve diagnostics.} See \S~\ref{sec:pg2}. \label{fig:pg2} } \end{figure*} \begin{figure*}[!h] \begin{center} \leavevmode \includegraphics[width=0.8\textwidth]{gaiatwo0005599752663752776192-0007_page03.pdf} \end{center} \vspace{-0.5cm} \caption{ {\bf Transit diagnostics.} See \S~\ref{sec:pg3}. \label{fig:pg3} } \end{figure*} \begin{figure*}[!h] \begin{center} \leavevmode \includegraphics[width=0.8\textwidth]{gaiatwo0005599752663752776192-0007_page04.pdf} \end{center} \vspace{-0.5cm} \caption{ {\bf Light-curves for increasing aperture sizes.} See \S~\ref{sec:pg4}. \label{fig:pg4} } \end{figure*} \begin{figure*}[!h] \begin{center} \leavevmode \includegraphics[width=0.8\textwidth]{gaiatwo0005599752663752776192-0007_page05.pdf} \end{center} \vspace{-0.5cm} \caption{ {\bf Cluster membership assessment diagnostics.} See \S~\ref{sec:pg5}. \label{fig:pg5} } \end{figure*} \begin{figure*}[!h] \begin{center} \leavevmode \includegraphics[width=0.8\textwidth]{gaiatwo0005599752663752776192-0007_page06.pdf} \end{center} \vspace{-0.5cm} \caption{ {\bf Imaging variability diagnostics.} See \S~\ref{sec:pg6}. \label{fig:pg6} } \end{figure*} \begin{figure*}[!h] \begin{center} \leavevmode \includegraphics[width=0.85\textwidth]{TIC125192758O-lb20190918_NGC_2323_neighborhood.png} \end{center} \vspace{-0.5cm} \caption{ {\bf Neighborhood diagnostic.} See \S~\ref{sec:nbhd}. \label{fig:nbhd} } \end{figure*} \begin{figure*}[!h] \begin{center} \leavevmode \includegraphics[width=0.85\textwidth]{TIC125192758O-lb20190918_NGC_2323_neighborhood_extra.png} \end{center} \vspace{-0.5cm} \caption{ {\bf Neighborhood diagnostic with additional overplotted points.} See \S~\ref{sec:nbhd_extra}. \label{fig:nbhd_extra} } \end{figure*} %%%%%%%%%%% % BIBLIOGRAPHY % %%%%%%%%%%% \clearpage \newpage \bibliographystyle{yahapj} \bibliography{bibliography} \end{document}
{ "alphanum_fraction": 0.7667882294, "avg_line_length": 38.3228915663, "ext": "tex", "hexsha": "e87b872530e3824c89a50037744dde8c5677408d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "187e15e620cd44160372dbfa9da989d38722c3e5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "lgbouma/cdips", "max_forks_repo_path": "doc/vetting_report_documentation/20191127_sector_6_and_sector_7_doc_TeX/ms.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "187e15e620cd44160372dbfa9da989d38722c3e5", "max_issues_repo_issues_event_max_datetime": "2021-08-18T17:55:10.000Z", "max_issues_repo_issues_event_min_datetime": "2019-08-17T20:33:23.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "lgbouma/cdips", "max_issues_repo_path": "doc/vetting_report_documentation/20191127_sector_6_and_sector_7_doc_TeX/ms.tex", "max_line_length": 98, "max_stars_count": 1, "max_stars_repo_head_hexsha": "187e15e620cd44160372dbfa9da989d38722c3e5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "lgbouma/cdips", "max_stars_repo_path": "doc/vetting_report_documentation/20191127_sector_6_and_sector_7_doc_TeX/ms.tex", "max_stars_repo_stars_event_max_datetime": "2019-10-04T02:03:25.000Z", "max_stars_repo_stars_event_min_datetime": "2019-10-04T02:03:25.000Z", "num_tokens": 4659, "size": 15904 }
\PassOptionsToPackage{unicode=true}{hyperref} % options for packages loaded elsewhere \PassOptionsToPackage{hyphens}{url} % \documentclass[]{book} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \usepackage{fixltx2e} % provides \textsubscript \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{textcomp} % provides euro and other symbols \else % if luatex or xelatex \usepackage{unicode-math} \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} \fi % use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} % use microtype if available \IfFileExists{microtype.sty}{% \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt} } \usepackage{hyperref} \hypersetup{ pdftitle={Foundational Statistics - Bi 610 - Spring 2020}, pdfauthor={Clayton M. Small, William A. Cresko, and Andrew Muehleisen}, pdfborder={0 0 0}, breaklinks=true} \urlstyle{same} % don't use monospace font for urls \usepackage{color} \usepackage{fancyvrb} \newcommand{\VerbBar}{|} \newcommand{\VERB}{\Verb[commandchars=\\\{\}]} \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} % Add ',fontsize=\small' for more characters per line \usepackage{framed} \definecolor{shadecolor}{RGB}{248,248,248} \newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}} \newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}} \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}} \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\BuiltInTok}[1]{#1} \newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}} \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}} \newcommand{\ExtensionTok}[1]{#1} \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\ImportTok}[1]{#1} \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} \newcommand{\NormalTok}[1]{#1} \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}} \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}} \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} \newcommand{\RegionMarkerTok}[1]{#1} \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} \usepackage{longtable,booktabs} % Fix footnotes in tables (requires footnote package) \IfFileExists{footnote.sty}{\usepackage{footnote}\makesavenoteenv{longtable}}{} \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} % Redefines (sub)paragraphs to behave more like sections \ifx\paragraph\undefined\else \let\oldparagraph\paragraph \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} \fi \ifx\subparagraph\undefined\else \let\oldsubparagraph\subparagraph \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} \fi % set default figure placement to htbp \makeatletter \def\fps@figure{htbp} \makeatother \usepackage{booktabs} \usepackage{amsthm} \makeatletter \def\thm@space@setup{% \thm@preskip=8pt plus 2pt minus 4pt \thm@postskip=\thm@preskip } \makeatother \usepackage[]{natbib} \bibliographystyle{apalike} \title{Foundational Statistics - Bi 610 - Spring 2020} \author{Clayton M. Small, William A. Cresko, and Andrew Muehleisen} \date{2020-09-25} \begin{document} \maketitle { \setcounter{tocdepth}{1} \tableofcontents } \hypertarget{course-overview}{% \chapter{Course Overview}\label{course-overview}} This is the complete set of \emph{course materials} for the \emph{Foundational Statistics Course} at the University of Oregon for the Spring of 2020. It is written in \textbf{Markdown} so that it can be easily updated. In this book you will find nearly all the information you will need to complete the course. \hypertarget{introduction-to-the-course}{% \chapter{Introduction to the course}\label{introduction-to-the-course}} This is the complete set of \emph{course materials} for the \emph{Foundational Statistics Course} at the University of Oregon for the Spring of 2020. It is written in \textbf{Markdown} so that it can be easily updated. In this book you will find nearly all the information you will need to complete the course. \hypertarget{instructors}{% \section{Instructors}\label{instructors}} Dr.~Clay Small, \href{mailto:[email protected]}{\nolinkurl{[email protected]}} Dr.~Bill Cresko, \href{mailto:[email protected]}{\nolinkurl{[email protected]}} \hypertarget{course-information}{% \section{Course Information}\label{course-information}} Virtual Office Hours: T-R 12 to 1:30 (Zoom) \hypertarget{software}{% \section{Software}\label{software}} \begin{itemize} \item Latest version of R \item Latest version of RStudio \end{itemize} \hypertarget{inclusion-and-accessibility}{% \section{Inclusion and Accessibility}\label{inclusion-and-accessibility}} Please tell us your preferred pronouns and/or name, especially if it differs from the class roster. We take seriously our responsibility to create inclusive learning environments. Please notify us if there are aspects of the instruction or design of this course that result in barriers to your participation! You are also encouraged to contact the Accessible Education Center in 164 Oregon Hall at 541-346-1155 or \href{mailto:[email protected]}{\nolinkurl{[email protected]}}. We are committed to making this course an inclusive and respectful learning space. Being respectful includes using preferred pronouns for your classmates. Your classmates come from a diverse set of backgrounds and experiences; please avoid assumptions or stereotypes, and aim for inclusivity. Let us know if there are classroom dynamics that impede your (or someone else's) full engagement. Because of the COVID-19 pandemic, this course is being delivered entirely remotely. We realize that this situation makes it difficult for some students to interact with the material, for a variety of reasons. We are committed to flexibility during this stressful time and emphasize that we will work with students to overcome difficult barriers as they arise. Please see this page for more information on campus resources, academic integrity, discrimination, and harassment (and reporting of it). \hypertarget{course-schedule}{% \chapter{Course Schedule}\label{course-schedule}} \hypertarget{weeks-1-2}{% \section{Weeks 1-2}\label{weeks-1-2}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Data organization and management \begin{itemize} \tightlist \item best practices, reproducibility, etc. \end{itemize} \item Basic programming fundamentals for data curation \begin{itemize} \tightlist \item The Unix environment and fundamental commands \item Formatting and manipulating tabular text files from the terminal \end{itemize} \item Introduction to R and Rstudio \begin{itemize} \tightlist \item Installation/Updates \item R object types and assignment \end{itemize} \item Practice with R objects \begin{itemize} \tightlist \item vectors, matrices, data frames, etc. \end{itemize} \item Applying core programming fundamentals in R \begin{itemize} \tightlist \item vectorized operations \item replicate, apply family, ifelse, for loops, etc. \end{itemize} \end{enumerate} \hypertarget{week-3}{% \section{Week 3}\label{week-3}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Plotting/visualizing data as a means of exploration \begin{itemize} \tightlist \item Different plot types \item Scale, transformations, etc. \end{itemize} \item Fundamentals of plotting in base R \begin{itemize} \tightlist \item par \item using palettes, points, sizes, etc. to convey information \item axes and labels \end{itemize} \item R markdown \end{enumerate} \hypertarget{week-4}{% \section{Week 4}\label{week-4}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Population parameters, samples, and sampling distributions \begin{itemize} \tightlist \item Central Limit Theorem and the normal dist. \item Mean and st. dev. \end{itemize} \item Probability and probability distributions \item Calculating summary statistics \begin{itemize} \tightlist \item Other common summary statistics (quantiles, etc.) \end{itemize} \end{enumerate} \hypertarget{week-5}{% \section{Week 5}\label{week-5}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Parameter estimation \begin{itemize} \tightlist \item Simulating data sets with known parameters \item Revisit probability distributions \end{itemize} \item Uncertainty in estimation \begin{itemize} \tightlist \item Parametric and nonparametric approaches to uncertainty \end{itemize} \end{enumerate} \hypertarget{week-6}{% \section{Week 6}\label{week-6}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Experimental design \begin{itemize} \tightlist \item lexicon \item considering sources of variance \item types of variables (categorical, ordinal, rational) \item confounding variables \end{itemize} \item Frequentist hypothesis testing \begin{itemize} \tightlist \item error types \item p-values \item degrees of freedom \item statistical power \item multiple testing problem \end{itemize} \end{enumerate} \hypertarget{week-7}{% \section{Week 7}\label{week-7}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Comparing means between groups \begin{itemize} \tightlist \item Student's t-test \end{itemize} \item Bootstrapping and randomization to compare means \end{enumerate} \hypertarget{week-8}{% \section{Week 8}\label{week-8}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Relationships between quantitative variables \begin{itemize} \tightlist \item correlation and covariance \end{itemize} \item Simple linear regression \begin{itemize} \tightlist \item residuals and least squares \item fitting linear regression models \end{itemize} \end{enumerate} \hypertarget{week-9}{% \section{Week 9}\label{week-9}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Analysis of variance \begin{itemize} \tightlist \item Table components and test statistics \end{itemize} \item General linear models in R \begin{itemize} \tightlist \item Model formulae \item Interpretation of summary output \end{itemize} \item More complex ANOVA frameworks \begin{itemize} \tightlist \item Nested models \item Factorial models \end{itemize} \end{enumerate} \hypertarget{week-10}{% \section{Week 10}\label{week-10}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item Frequency-based statistical tests \begin{itemize} \tightlist \item Chi-squared tests \item Contingency tables and tests of independence \end{itemize} \item Brief introduction to generalized linear models (time permitting) \begin{itemize} \tightlist \item logistic regression \end{itemize} \end{enumerate} \hypertarget{background-material-for-the-course}{% \chapter{Background material for the course}\label{background-material-for-the-course}} \hypertarget{description-of-the-course}{% \section{Description of the course}\label{description-of-the-course}} This course in an introduction to data management, data visualization, and statistical inference. It is intended for early-stage graduate students with no background in statistics. No prior coursework (undergraduate or graduate) in statistics or programming is assumed. The primary objective of the course is to get students up to speed with respect to organization, manipulation, visualization, and analysis of data, using the R statistical language. The emphasis on application is strong, with the goal of enabling students (after the course) to analyze their own data sets with confidence using reasonable approaches, and, when faced with more difficult analyses, to be able to communicate their inference objectives clearly to expert analysts. Students will learn to organize and analyze data sets in the form of RStudio projects, using R Markdown files to reproducibly capture and render code, visualizations, and analyses. In-class exercises will be delivered in the form of pre-formatted R Notebooks, which can be interactively executed by students without having to write all code from scratch. The course is designed to acquaint students primarily with univariate (single response variable) analysis. Multivariate analysis will be covered in the Advanced Biostatics 2-course series offered during the Fall and Winter terms. Examples and assignments in class will include data sets primarily from the biological sciences, including studies of morphological and molecular traits, behaviors, ecological questions, and clinical studies. For specific statistical topics covered in class, please see the course goals and tentative schedule below. \hypertarget{course-goals}{% \section{Course goals:}\label{course-goals}} \begin{itemize} \tightlist \item Properly organize and format primary data and metadata files for analysis \item Learn programming fundamentals of the R statistical language, including objects, functions, iteration, and simulation. \item Make publication-quality data visualizations, including scatterplots, boxplots, frequency distributions, mosaic plots, etc. \item Understand Type I and Type II statistical error, including p-values and power analysis. \item Understand ordinary least-squares regression and linear models in general \item Learn the fundamentals of strong experimental design \item Learn to apply general linear models to basic univariate analysis problems, including Analysis of Variance (ANOVA) \item Learn nonparametric approaches to parameter estimate and statistical inference, including resampling (bootstrapping), permutation, and rank- based analysis. \item Understand how to analyze binary response variables and frequency-based (e.g.~contingency table) data sets. \end{itemize} \hypertarget{introduction-to-r-and-rstudio}{% \section{Introduction to R and RStudio}\label{introduction-to-r-and-rstudio}} R is a core computational platform for statistical analysis. It was developed a number of years ago to create an open source environment for advanced computing in statistics and has since become the standard for statistical analysis in the field, replacing commercial packages like SAS and SPSS for the most part. Learning R is an essential part of becoming a scientist who is able to work at the cutting edge of statistical analysis -- or even to perform conventional statistical tests (e.g.~a t-test) in a standard way. An important part of R is that it is script-based, which makes it easy to create reproducible analysis pipelines, which is an emerging feature of the open data/open analysis movement in science. This is becoming an important component of publication and sharing of research results, so being able to engage fully with this effort is something that all young scientists should do. RMarkdown is an extra layer placed on top of R that makes it easy to integrate text explanations of what is going on, native R code/scripts, and R output all in one document. The final result can be put into a variety of forms, including webpages, pdf documents, Word documents, etc. Entire books are now written in RMarkdown and its relatives. It is a great way to make quick webpages, like this document, for instance. It is very easy to use and will be the format that I use to distribute your assignments to you and that you will use to turn in your assignments. R Projects are a simple way of designating a working directory in which to house files related to a given, well, project. Those files might include primary data and metadata files ready for reading into R, \texttt{.R} scripts, Rmarkdown files, and output such as Rmarkdown-rendered .html files or individual plots, for example. The nice thing about organizing your work with R Projects is that you can keep everything needed to reproduce an analysis in a single directory on your computer. You can open an R Project in RStudio by opening the project's index (\texttt{.RProj}) file, which will automatically set your working directory to that of the project and facilitate loading any saved environments, etc. In Chapter 6 we will begin working in R and RStudio, but you can get them installed now (in that order) on your computer, if you haven't already. Get the most recent \emph{released} R version by following this link: \url{https://www.r-project.org/} We will do our work using Rstudio, which is a powerful and convenient user interface for R, and can be downloaded from here for installation: \url{https://rstudio.com/products/rstudio/} \hypertarget{learning-resources}{% \subsection{Learning resources}\label{learning-resources}} There are tons of resources for learning R and RMarkdown on the internet. Here are just a few, but you will no doubt find your own favorites as you become routine R users. There is an organized group that is dedicated to training in R called DataCamp (\url{https://www.datacamp.com/}). They provide all of the basics for free. They actually have training for most data science platforms. RStudio provides links for training directly related to R and RMarkdown here: \url{https://education.rstudio.com/} There are also many, many R training videos on YouTube. Most of them are very well meaning but may not be as in-depth as you want. You can also go the old ``paper'' manual route by reading the materials provided by R itself: \url{https://cran.r-project.org/doc/manuals/r-release/R-intro.pdf} In reality, if you want to do almost anything in R, simply type in what you are interested in doing into Google and include ``in R'' and a whole bunch of links telling you exactly what to do will magically appear. Most of them appear as discussions on websites like StackOverflow and Stats.StackExchange. In that case, the first thing that you see is the question--usually someone doing it just a bit wrong--so you should scroll down to see the right way to do it in the answers. It is really an amazing resource that will speed you along in nearly every form of analysis that you are interested in. Please do not hesitate to contact us if you have questions or run into obstacles. The point of this class is to learn by doing, but our aim is that the doing should involve reasonable first efforts supplemented with help if needed. Also, many of your classmates have some experience with R, writing code, or statistics in general, so they are an excellent resource as well! \hypertarget{organizing-and-manipulating-data-files}{% \chapter{Organizing and manipulating data files}\label{organizing-and-manipulating-data-files}} \hypertarget{introduction}{% \section{Introduction}\label{introduction}} Many of you will already be familiar with data file organizaiton, editing, and formatting for analysis. If so, much of the following material may be review. If not, some of the following guidelines and tools should prove to be quite useful. In biology, and many other fields, primary data are routinely stored as ``flat'' text files. The exact formatting depends on the type of data, of course, but often we are working with text files organized into rows and columns. Rows can naturally be defined by lines in a file, and columns can be defined by separators (also called delimiters) such as spaces, tabs, or commas, to name a few commonly used ones. Fortunately there are some very powerful and simple-to-use (with a little practice) tools that can be invoked directly from a computer's command line, or included in written ``scripts'' that your computer's operating system can interpret upon you running them. These command line tools are now nearly ubiquitous on all personal computer platforms. Computers running a LINUX operating system allow direct access to these tools via the command line, as does the macOS operating system of Apple computers via the Terminal. Computers running Microsoft Windows 10 now also facilitate use of these conventional ``UNIX tools'' through a Windows Subsystem for Linux. In the following sections, we provide a \emph{very brief} introduction to using some of these tools in order to organize your data files, parse them for information, and perform some basic text manipulations. Mastering these activities is not necessary for this course (in fact, many of the text manipulation tasks can be done in R!), but if you learn to adopt at least some of these skills you will become a better, more organized analyst, and it will help you become comfortable with the command line and programming in general. \hypertarget{navigating-file-systems-from-the-command-line}{% \section{Navigating file systems from the command line}\label{navigating-file-systems-from-the-command-line}} \hypertarget{access-to-the-command-line}{% \subsection{Access to the command line}\label{access-to-the-command-line}} The first step to using command line tools is to get access to the command line! On Mac and Linux systems you can simply do this by finding and opening the \texttt{Terminal} application. On Windows 10 systems, you'll have to install a Linux Bash Shell if you haven't already. To do this you will need to follow the instructions here: \url{https://itsfoss.com/install-bash-on-windows/} When you get to the point of choosing the Linux distribution to install, I recommend Ubuntu. At this point you should have command line access through a terminal prompt, which should look something like my Mac Terminal below: \includegraphics{/Users/csmall/github_repos/Found_Stat/images/MacTerminal.png} You are now ready to navigate and explore files simply by typing! \hypertarget{navigating-directories-and-files}{% \subsection{Navigating directories and files}\label{navigating-directories-and-files}} When you are at the command line, just think of your computer as you would if you were navigating using a graphical application (e.g.~Mac Finder or Windows Explorer). You are always in a directory in your file system, and you can move to any other directory by typing the appropriate command and destination, then hitting Enter. The first crucial UNIX command to learn is \texttt{pwd}. This command stands for ``print working directory,'' and it will literally print the path of the directory you are currently in. Another important command is \texttt{ls}. This lists the files and directories (by default) in your working directory. If you specify a different directory, it will list the files and/or directories there. Most UNIX commands (and indeed command-line programs in general), can be run with options. One way to invoke an option is to type a ``flag'' along with the command. In the case of \texttt{ls}, we can type \texttt{ls\ -l}, for example, which will print the output line-by-line. We can also add another flag: \texttt{ls\ -lh} (equivalent to \texttt{ls\ -l\ -h}), which will print items line-by-line but also make sure the item sizes are ``human readable.'' If you ever have questions about how to use a UNIX program, including the flags and other options, you can type \texttt{man\ program\_name} and a wonderful help manual will appear. To exit and return to the command prompt, just hit ``q''. These \texttt{man} pages are extremely useful and should be your first go-to if you need information for a particular command. Please use these regularly! The command \texttt{cd} will change your location from the current directory to another directory. Like many other programs (UNIX and otherwise) requiring you to input directory and file locations, with \texttt{cd} you can specify your desired location using either the \emph{absolute} or \emph{relative} path. An absolute path is the full ``address'' of a directory or file, starting from the root of your file system. An example of an absolute path to a directory in my file system is \texttt{/Users/csmall/Dropbox/sculpin\_project/images/}. Regardless of where my current working directory is in my file system, I can change to this \texttt{images/} directory using \texttt{cd} and the full path. I can also use a relative path, which is a sort of ``shortcut,'' to specify the location of a directory or file. Let's say I am in \texttt{/Users/csmall/Dropbox/BiostatsFound\_S2020/} and I want to get to the \texttt{images/} directory above. I could type \texttt{cd\ ../sculpin\_project/images}, which uses a relative path to take me ``up'' one directory (as denoted by \texttt{../}) into \texttt{Dropbox/} and back ``down'' into \texttt{sculpin\_project/images}. In fact, \texttt{..} is a special file in every directory that just means ``the directory above.'' The special file \texttt{.} is the current directory. And to mention one final useful designation for navigation shortcuts, you can use the \texttt{\textasciitilde{}} to denote your home directory. The schematic below should help you visualize how to think about file system navigation from the commmand line: \includegraphics{/Users/csmall/github_repos/Found_Stat/images/Directory_example.jpeg} And for another example, take a look at this series of navigation commands from my terminal and see if you can follow along: \includegraphics{/Users/csmall/github_repos/Found_Stat/images/MacTerminal_2.png} If you want to create a new directory, you can use the \texttt{mkdir} command, including the desired name of the new directory. By default this will create the directory in your current working directory, but you can use absolute or relative paths to instead write the directory somewhere else. If you want to delete an empty directory, \texttt{rmdir} is the appropriate command. Now let's briefly cover some UNIX commands that are useful for managing files. Some of these apply to directories as well, which I will point out as we go. The command \texttt{touch} can be used to create a new, empty file, which you can add to using a plain text editor. Examples of popular plain text editors with advanced user interfaces are BBEdit and Atom. You can also use command line text editors, such as \texttt{nano}, \texttt{emacs}, and \texttt{vim}. Most UNIX/LINUX systems have \texttt{nano} installed by default. To copy or change the name and/or location of a file (or directory), use \texttt{cp} and \texttt{mv} commands, respectively. Note that by using absolute or relative paths, you can specify where you want the file or directory to end up. Be especially careful with these, however, because you will overwrite any existing file or directory if you specify the same name and location. Another command you should be extremely cautious with is \texttt{rm}, which removes (permanently deletes) a file. \texttt{rm\ -r} can be used to delete a non-empty directory AND all of its contents. In many cases you will want to look at files, or parts of them at least, from the command line. \texttt{cat} will print the entire contents of a file, but can also be used to combine (``concatenate'') multiple files in a line-wise manner. \texttt{less} and \texttt{more} will display specific lines of a file (starting with the first ones), with single- or multi-line ``scrolling,'' respectively, activated using the return or down-arrow keys. To leave the display, you need to hit the ``q'' key. \texttt{head} and \texttt{tail} will display the first or last, respectively, \emph{n} lines of the file, where \emph{n} is provided as a flag (e.g. \texttt{head\ -200\ file.tsv}). The ``word count'' command \texttt{wc} can quantify elements of a text file in various ways, but one common application is \texttt{wc\ -l}, which counts the number of lines in a file. An aside: If you are working from the command line and want to terminate a process (say you accidentally start a task that will take way too long), press Ctrl-C. \hypertarget{a-quick-review-of-important-unix-commands-for-navigation-and-viewing}{% \subsubsection{A quick review of important UNIX commands for navigation and viewing}\label{a-quick-review-of-important-unix-commands-for-navigation-and-viewing}} \texttt{pwd} - prints working directory \texttt{ls} - lists contents of a directory \texttt{cd} - changes the working directory \texttt{mkdir} - creates a new directory \texttt{rmdir} - deletes an empty directory \texttt{touch} - creates an empty file \texttt{cp} - copies a file or directory \texttt{mv} - changes the name of a file or directory \texttt{rm} - deletes a file, or a directory and everything inside with \texttt{-r} \texttt{cat} - prints the entire file to the terminal, or concatenates and prints multiple files \texttt{less} - displays the first lines of a file, with scrolling line-by-line \texttt{head} - prints the first 10 lines (default) of a file \texttt{tail} - prints the last 10 lines (default) of a file \texttt{wc\ -l} - prints the number of lines in a file \hypertarget{useful-unix-commands-for-file-manipulation}{% \subsection{Useful UNIX commands for file manipulation}\label{useful-unix-commands-for-file-manipulation}} In many cases you will want to search for specific characters or combinations of characters, and do various things with that information. Maybe you want to isolate the lines of a file that contain the query, or perhaps you want to count how many lines contain the query. The tool \texttt{grep} is extremely useful in this regard. We don't have time for a comprehensive dive into the utilities of \texttt{grep}, but a few common applications are worth mentioning. Character patterns we search for using \texttt{grep} may or may not involve special characters that are not interpreted literally. Here we will discuss just a few common cases of \texttt{grep} searches and the special characters involved. Some examples of these special characters include \texttt{\^{}} (beginning of a line), \texttt{\$} (end of a line), \texttt{.} (any single character except a newline), \texttt{*} (zero or more instances of the preceding character), and \texttt{\textbackslash{}s} (any white space). The standard syntax for \texttt{grep} from the command line is \texttt{grep\ "expression"\ filename}. So, if you wanted to return all of the lines in the data file \texttt{zfish\_data.tsv} (assuming it is in the current directory) that begin with ``embryo\_10'', you could try \texttt{grep\ "\^{}embryo\_10"\ zfish\_data.tsv}. This search would also (unintentionally) find lines beginning with ``embryo\_100'' or ``embryo\_101'', etc., if they exist. So, you have to be careful, and learning the rules just takes practice. In this case \texttt{grep\ "\^{}embryo\_10\textbackslash{}s"\ zfish\_data.tsv} would achieve the desired result, assuming that there is a whitespace delimiter between fields (``columns'') in the data file. Useful flags for \texttt{grep} include \texttt{-c} (which counts the number of lines containing the query), \texttt{-v} (which returns the lines that \emph{do not} contain the query), and \texttt{-n} (which prints the line number for each line containing the query). I encourage you to look at many different \texttt{grep} use cases online as your demand for complex searches grows. The program \texttt{sed} has reasonably complex applications, but is commonly used as a sort of ``search and replace'' tool. The syntax for \texttt{sed} use is similar to \texttt{grep}, except that the query and replacement expressions are organized (with other information) using slashes. For ``search and replace'' functionality, that syntax looks like this: \texttt{sed\ \textquotesingle{}s/query/replacement/flag\textquotesingle{}\ filename}. One common option for the ``flag'' component is ``g'', meaning ``global'', which replaces all instances. If no flag designation is made only the first instance in the file is replaced. Building on our toy example from above, \texttt{sed\ \textquotesingle{}s/\^{}embryo\_/\^{}larva\_/g\textquotesingle{}\ zfish\_data.tsv} would perform a global replacement and print the output to the terminal. To change the contents in the original file on the fly, including \texttt{sed\ -i} would do the trick, but is riskier than redirecting the output to a new file. \texttt{cut} is quite straightforward, and can be used to isolate individual fields (think of them like ``columns'') from a text file, provided the fields are consistently separated by a delimiter on each line. So, if I had a comma-separated file and I just wanted the first two columns I could type \texttt{cut\ -f1,2\ -d"\textbackslash{}t"\ filename}. Note that if you don't specify a delimiter using the \texttt{-d} flag, then it is assumed to be tab-delimited. If you want to bring together fields in separate files, \texttt{join} can be used to accomplish this. The two files should have equivalent rows, however, for this action to work properly. If you want to sort text files alphanumerically, in a field-wise fashion, \texttt{sort} is quite useful. If a file contains a single field, minimal specification is required, aside from tuning numerical sorting. For example, if you want to sort numerically, use the \texttt{-n} flag, and if you want to sort from largest to smallest, add the \texttt{-r} flag. If you want to sort a multi-field file based on just one field, you can use the ``key'' flag. For instance, if you have a tab-delimited file and want to sort by the second field in reverse numerical order, \texttt{sort\ -k2,2\ -nr\ filename.tsv} would give you the desired result. Finally, if you want to eliminate lines with the same value for a given field, you can use the \texttt{-u} ``unique'' flag. The UNIX program \texttt{awk} is an extremely powerful tool, and can itself be used essentially as a mini programming language. We will not get into the myriad uses of \texttt{awk} here, but the reference at the bottom of the chapter is a great resource if you want to learn more. \texttt{awk} is extremely efficient at parsing and capturing text files in a column-wise manner, with the ability to also evaluate logical statements applied to rows. The structure of \texttt{awk} commands is more complex than that of other UNIX programs we have discussed, but it is still very intuitive. One unique feature is that \texttt{awk} contains its own internal functions, which are typed inside curly braces. The ``print'' function can be used to extract fields, much like \texttt{cut}. For instance, \texttt{awk\ -F:\ \textquotesingle{}\{print\ \$1,\$6\}\textquotesingle{}\ filename.tsv} would print the first and sixth field from \texttt{filename.tsv}, assuming a ``:'' delimiter. With \texttt{awk}, fields are specified using the \texttt{\$} character. If you want also to select only specific rows from a set of columns (like those with a certain value), you can incorporate logical operators. In the above example if we had wanted fields 1 and 6, but only those rows with a value of at least 610 in field 4, we could type the following \texttt{awk\ -F:\ \textquotesingle{}\$4\ \textgreater{}=\ 610\ \{print\ \$1,\$6\}\textquotesingle{}\ filename.tsv}. Again, this is just scratching the surface with \texttt{awk}, which boasts a great deal of potential for your text file manipulation needs. \hypertarget{a-quick-review-of-key-unix-commands-for-text-file-searching-and-manipulation}{% \subsubsection{A quick review of key UNIX commands for text file searching and manipulation}\label{a-quick-review-of-key-unix-commands-for-text-file-searching-and-manipulation}} \texttt{grep} - searches a file for characters and character combinations \texttt{sed} - stream edits characters and character combinations \texttt{cut} - isolates specific fields (``columns'') from a file using a delimiter \texttt{join} - combines fields (``columns'') from multiple files with equivalent rows \texttt{sort} - orders the rows in a file based on one or more fields \texttt{awk} - flexibly parses, evaluates, and selectively prints row- and column-wise \hypertarget{a-quick-word-on-pipes-and-carrots}{% \subsection{A quick word on pipes and carrots}\label{a-quick-word-on-pipes-and-carrots}} One very convenient feature of UNIX commands is that you can control the flow of input and output from one command to another using the \texttt{\textbar{}} (``pipe'') character. For instance, I may want to search an entire file for rows that begin with ``fish-1'', and then replace the ``-'' with ``\_''. To do this I could do something like \texttt{cat\ file.tsv\ \textbar{}\ grep\ "\^{}fish-1"\ \textbar{}\ sed\ \textquotesingle{}s/fish-1/fish\_1/g\textquotesingle{}} This, of course, would print the output to the terminal, but I could actually capture that output into a file using the \texttt{\textgreater{}} character. \texttt{cat\ filename\ \textbar{}\ grep\ "\^{}fish-1"\ \textbar{}\ sed\ \textquotesingle{}s/fish-1/fish\_1/g\textquotesingle{}\ \textgreater{}\ ./newfile.tsv} would write this new file to my current working directory. Furthermore, if you want to append lines of text to an existing file, the ``double sideways right-pointing carrot'' character \texttt{\textgreater{}\textgreater{}} can be used. The above lessons on UNIX commands for file manipulation truly just scratch the surface of what can be accomplished at the command line and in ``shell scripts.'' You certainly will have further questions and be hungry for more, but we simply don't have time during this course. But to work on your UNIX skills for now, check out \texttt{Ex1\_Unix\_Intro.html} (on Canvas). We need to move on to R now, but at the bottom of this chapter are some UNIX command resources I have found to be especially useful. \hypertarget{data-file-and-data-file-entry-dos-and-donts}{% \section{Data file and data file entry dos and don'ts}\label{data-file-and-data-file-entry-dos-and-donts}} Do store a copy of your data in a nonproprietary format, such as plain ASCII text (aka a flat file). This is especially important if you are using tools (like UNIX commands) to parse and manipulate the files. Formats like Microsoft Excel are not acceptable as input for many analysis tools, and not everyone has access to proprietary software. Do leave an un-edited copy of an original data file, even when main analyses require an edited version. Do use descriptive names for your data files and variables, and use them consistently! Do maintain effective metadata about the data. Do add new observations to a data file as rows. Do add new variables to a data file as columns. Don't include multiple data types in the same column. Don't use non-alphanumeric characters (other than the underscore) in file or directory names. Don't use spaces, tabs, commas, colons, semicolons, or other characters commonly used as field (column) delimiters in names of individual data entries. For example, don't use something like \texttt{March\ 8} as a value for date in a data set. Don't copy and paste data directly from rich-text-formatted files (like Microsoft Word) into primary data files. \hypertarget{exercises-associated-with-this-chapter}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter}} \begin{itemize} \tightlist \item Exercise 1 (file: \texttt{Ex1\_Unix\_Intro.html}) \end{itemize} \hypertarget{additional-learning-resources}{% \section{Additional learning resources}\label{additional-learning-resources}} \begin{itemize} \item \url{http://mally.stanford.edu/~sr/computing/basic-unix.html} - A nice ``cheat sheet'' \item \url{http://korflab.ucdavis.edu/Unix_and_Perl/} - Outstanding tutorial by Keith Bradnam and Ian Korf \item \url{https://www.datacamp.com/courses/introduction-to-shell-for-data-science} - DataCamp tutorial \item \url{https://www.gnu.org/software/gawk/manual/gawk.html} - A comprehensive guide to \texttt{awk} \end{itemize} \hypertarget{an-introduction-to-the-r-language}{% \chapter{An Introduction to the R language}\label{an-introduction-to-the-r-language}} \hypertarget{background}{% \section{Background}\label{background}} \texttt{R} is a computer programming language and environment especially useful for graphic visualization and statistical analysis of data. It is an offshoot of a language developed in 1976 at Bell Laboratories called \texttt{S}. \texttt{R} is an interpreted language, meaning that every time code is run it must be translated to machine language by the \texttt{R} interpreter, as opposed to being compiled prior to running. \texttt{R} is the premier computational platform for statistical analysis thanks to its GNU open-source status and countless packages contributed by diverse members of the scientific community. \hypertarget{why-use-r}{% \section{\texorpdfstring{Why use \texttt{R}?}{Why use R?}}\label{why-use-r}} \begin{itemize} \tightlist \item Good general scripting tool for statistics and mathematics \item Powerful and flexible and free \item Runs on all computer platforms \item New packages released all the time \item Superb data management \& graphics capabilities \item Reproducibility - can keep your scripts to see exactly what was done \item Can embed your \texttt{R} analyses in dynamic, polished files using R markdown \item You can write your own functions \item Lots of online help available \item Can use a nice IDE such as \texttt{RStudio} \end{itemize} \hypertarget{important-r-terms-and-definitions}{% \section{\texorpdfstring{Important \texttt{R} terms and definitions}{Important R terms and definitions}}\label{important-r-terms-and-definitions}} \begin{figure} \centering \includegraphics{/Users/csmall/github_repos/Found_Stat/images/R_definitions_Logan.001.jpeg} \caption{Alt text} \end{figure} From Logan, M. 2010. \emph{Biostatistical Design and Analysis Using R} Operators are symbols in programming that have a specific meaning \begin{figure} \centering \includegraphics{/Users/csmall/github_repos/Found_Stat/images/R_definitions_Logan.002.jpeg} \caption{Alt text} \end{figure} From Logan, M. 2010. \emph{Biostatistical Design and Analysis Using R} \hypertarget{getting-started-with-r-via-the-rstudio-environment}{% \section{\texorpdfstring{Getting started with \texttt{R} via the RStudio Environment}{Getting started with R via the RStudio Environment}}\label{getting-started-with-r-via-the-rstudio-environment}} To begin working with \texttt{R}, open RStudio. You should first see something that looks like this: \includegraphics{/Users/csmall/github_repos/Found_Stat/images/MacTerminal_3.png} To open a new script editor (where you will keep track of your code and notes), go to File \textgreater{} New File \textgreater{} R Script. Note that there are other options for file types, which we will be using in the future. For now, though, we want a plain script, which when saved will have the extention \texttt{.R}. It is easy to run code directly from the script editor. For single lines of code, simply make sure your cursor is on that line, and hit Ctrl-Enter. For multiple lines, highlight the block of code you want to run and hit Ctrl-Enter. Now your display should look somehting like below (but without the red pane labels, of course): \includegraphics{/Users/csmall/github_repos/Found_Stat/images/R_definitions_Logan.003.jpeg} Note that you can also type commands directly from the command line using the \texttt{R} Console (lower left pane), and the \texttt{R} interpreter will run them when you press Enter. Any objects you define, and a summary of their values, will appear in the upper right pane, and the lower right pane differs in appearance depending on instructions you provide to \texttt{R\ Studio}. For instance, if you produce a plot, it will appear there by default. Another extremely important feature of R functions (we'll get to them in a bit) is the help file. Recall from Chapter 5 our discussion of \texttt{man} pages for UNIX programs. Help files the equivalent for \texttt{R} functions. They contain almost everything you need to know about a given function, and most of them even include and example at the bottom. These help files will appear in the lower right RStudio pane when you call them, for example when you run \texttt{help(function\_name)} at the \texttt{R} Console. \hypertarget{r-programming-basics}{% \subsection{R Programming Basics}\label{r-programming-basics}} For the code examples below, it might be useful for you to start your own RStudio session, open a new \texttt{.R} file and type/run code while reading. \begin{itemize} \tightlist \item Commands can be submitted through the terminal, console or scripts \item In your scripts, anything that follows \texttt{\#} symbol (aka hash) is just for humans \item Notice on these slides I'm evaluating the code chunks and showing output \item The output is shown here after the two \texttt{\#} symbols and the number of output items is in \texttt{{[}{]}} \item Also notice that \texttt{R} follows the normal priority of mathematical evaluation \end{itemize} \begin{Shaded} \begin{Highlighting}[] \DecValTok{4}\OperatorTok{*}\DecValTok{4} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 16 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{(}\DecValTok{4}\OperatorTok{+}\DecValTok{3}\OperatorTok{*}\DecValTok{2}\OperatorTok{^}\DecValTok{2}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 16 \end{verbatim} \hypertarget{a-note-on-r-markdown}{% \subsubsection{\texorpdfstring{A note on \texttt{R} Markdown}{A note on R Markdown}}\label{a-note-on-r-markdown}} This format provides a much better way to embed code and output, in an easily readable, reproducible manner. We will dive into \texttt{R} Markdown next week, so for now just be aware that it exists. \begin{itemize} \item \url{http://kbroman.org/knitr_knutshell/pages/Rmarkdown.html} \item You can insert \texttt{R} chunks into \texttt{Rmarkdown} documents \end{itemize} \hypertarget{assigning-variables}{% \subsubsection{Assigning Variables}\label{assigning-variables}} \begin{itemize} \item To ``store'' information for later use, like the arithmetic operation above, we can assign variables in \texttt{R}. \item Variables are assigned values using the \texttt{\textless{}-} operator. \item Variable names must begin with a letter, and should not contain spaces or \texttt{R} operators (see above) but other than that, just about anything goes. It is good practice to avoid periods in variable names, as they have other functionality in related programming languages, like Python. \item Do keep in mind that \texttt{R} is case sensitive. \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{x <-}\StringTok{ }\DecValTok{2} \NormalTok{x }\OperatorTok{*}\StringTok{ }\DecValTok{3} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 6 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{y <-}\StringTok{ }\NormalTok{x }\OperatorTok{*}\StringTok{ }\DecValTok{3} \NormalTok{y }\OperatorTok{-}\StringTok{ }\DecValTok{2} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 4 \end{verbatim} These do not work \begin{Shaded} \begin{Highlighting}[] \NormalTok{3y <-}\StringTok{ }\DecValTok{3} \DecValTok{3}\OperatorTok{*}\NormalTok{y <-}\StringTok{ }\DecValTok{3} \end{Highlighting} \end{Shaded} \hypertarget{arithmetic-operations-with-functions}{% \subsubsection{Arithmetic operations with functions}\label{arithmetic-operations-with-functions}} \begin{itemize} \item Arithmetic operations can be used with functions as well as numbers. \item Try the following, and then your own. \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{x}\OperatorTok{+}\DecValTok{2} \NormalTok{x}\OperatorTok{^}\DecValTok{2} \KeywordTok{log}\NormalTok{(x) }\OperatorTok{+}\StringTok{ }\KeywordTok{log}\NormalTok{(x}\OperatorTok{+}\DecValTok{1}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{itemize} \item Note that the last of these - \texttt{log()} - is a built in function of \texttt{R}, and therefore the argument for the function (in this case ``x'' or ``x+1'') needs to be put in parentheses. \item These parentheses will be important, and we'll come back to them later when we add other arguments after the object in the parentheses. \item The outcome of calculations can be assigned to new variables as well, and the results can be checked using the \texttt{print()} function. \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{y <-}\StringTok{ }\DecValTok{67} \KeywordTok{print}\NormalTok{(y)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 67 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{x <-}\StringTok{ }\DecValTok{124} \NormalTok{z <-}\StringTok{ }\NormalTok{(x}\OperatorTok{*}\NormalTok{y)}\OperatorTok{^}\DecValTok{2} \KeywordTok{print}\NormalTok{(z)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 69022864 \end{verbatim} \hypertarget{strings}{% \subsubsection{Strings}\label{strings}} \begin{itemize} \item Assignments and operations can be performed on characters as well. \item Note that characters need to be set off by quotation marks to differentiate them from numeric objects. \item The c(function) stands for `concatenate'. \item Note that we are using the same variable names as we did previously, which means that we're overwriting our previous assignment. \item A good general rule is to use new names for each variable, and make them short but still descriptive \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{x <-}\StringTok{ "I Love"} \KeywordTok{print}\NormalTok{ (x)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "I Love" \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{y <-}\StringTok{ "Biostatistics"} \KeywordTok{print}\NormalTok{ (y)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "Biostatistics" \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{z <-}\StringTok{ }\KeywordTok{c}\NormalTok{(x,y)} \KeywordTok{print}\NormalTok{ (z)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "I Love" "Biostatistics" \end{verbatim} The variable z is now a vector of character objects. \hypertarget{factors}{% \subsubsection{Factors}\label{factors}} \begin{itemize} \item Sometimes we would like to treat character objects as if they were units for subsequent calculations. \item These are called factors, and we can redefine our character object as one of class factor. \item This might seem a bit strange, but it's important for statistical analyses where we might want to calculate the mean or variance for two different treatments. In that case the two different treatments would be coded as two different ``levels'' of a factor we designate in our metadata. This will become clear when we get into hypothesis testing in \texttt{R}. \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{z_factor <-}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(z)} \KeywordTok{print}\NormalTok{(z_factor)} \KeywordTok{class}\NormalTok{(z_factor)} \end{Highlighting} \end{Shaded} Note that factor levels are reported alphabetically. I used the \texttt{class()} function to ask \texttt{R} what type of object ``z\_factor'' is. \texttt{class()} is one of the most important tools at your disposal. Often times you can debug your code simply by changing the class of an object. Because functions are written to work with specific classes, changing the class of a given object is crucial in many cases. \hypertarget{null-values}{% \subsubsection{Null values}\label{null-values}} \begin{itemize} \item R (and many programming languages) have special strings that mean `no value', or `null'. In R, the most common is \texttt{NA}, although there are others as well (\texttt{NULL} and \texttt{NaN}) \item Typically, \texttt{NA} is used to indicate a lack of data for a given observation, or a missing value where there normally should be one. \item Any instance of a blank entry in your data file will be read into R as an \texttt{NA}. \item NA is a technically a logical data type, and is \emph{not} equivalent to an empty string or the numeric 0. It is also a reserved word and can't be used as a variable name. \end{itemize} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{class}\NormalTok{(}\OtherTok{NA}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "logical" \end{verbatim} Many functions in R (e.g. \texttt{mean()}) will not work by default if passed any NA values as an argument. So if we want to determine the mean of a vector of numeric values, we need to ensure there are either no NA values in the vector, or specify an additional `argument' to the function telling it to ignore \texttt{NA}. Additionally \texttt{NA}, like other `null' values, are operated on by a number of unique functions in R. \begin{Shaded} \begin{Highlighting}[] \NormalTok{num <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{,}\DecValTok{1}\NormalTok{,}\DecValTok{2}\NormalTok{,}\OtherTok{NA}\NormalTok{,}\DecValTok{4}\NormalTok{)} \KeywordTok{mean}\NormalTok{(num)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] NA \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{mean}\NormalTok{(num, }\DataTypeTok{na.rm =} \OtherTok{TRUE}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 1.75 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{is.na}\NormalTok{(num)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] FALSE FALSE FALSE TRUE FALSE \end{verbatim} \hypertarget{vectors}{% \subsubsection{Vectors}\label{vectors}} \begin{itemize} \item In general R thinks in terms of vectors (a list of characters factors or numerical values) and it will benefit any R user to try to write programs with that in mind. \item The simplest vectors in R are `atomic' vectors, meaning that they consist of only one data type. \item R operations, and therefore functions, are vectorized. \item This means an operation or function will be performed for each element in a vector. \item Vectors can be assigned directly using the `c()' function and then entering the exact values. \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{x <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\DecValTok{2}\NormalTok{,}\DecValTok{3}\NormalTok{,}\DecValTok{4}\NormalTok{,}\DecValTok{2}\NormalTok{,}\DecValTok{1}\NormalTok{,}\DecValTok{2}\NormalTok{,}\DecValTok{4}\NormalTok{,}\DecValTok{5}\NormalTok{,}\DecValTok{10}\NormalTok{,}\DecValTok{8}\NormalTok{,}\DecValTok{9}\NormalTok{)} \KeywordTok{print}\NormalTok{(x)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 2 3 4 2 1 2 4 5 10 8 9 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{x_plus <-}\StringTok{ }\NormalTok{x}\OperatorTok{+}\DecValTok{1} \KeywordTok{print}\NormalTok{(x_plus)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 3 4 5 3 2 3 5 6 11 9 10 \end{verbatim} \begin{itemize} \item Creating vectors of new data by entering it by hand can be a drag. \item However, it is also very easy to use functions such as \texttt{seq()} and \texttt{sample()}. \item Try the examples below. Can you figure out what the three arguments in the parentheses mean? \item Within reason, try varying the arguments to see what happens \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{seq_}\DecValTok{1}\NormalTok{ <-}\StringTok{ }\KeywordTok{seq}\NormalTok{(}\FloatTok{0.0}\NormalTok{, }\FloatTok{10.0}\NormalTok{, }\DataTypeTok{by =} \FloatTok{0.1}\NormalTok{)} \KeywordTok{print}\NormalTok{(seq_}\DecValTok{1}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1.0 1.1 1.2 1.3 1.4 ## [16] 1.5 1.6 1.7 1.8 1.9 2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 2.8 2.9 ## [31] 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 4.0 4.1 4.2 4.3 4.4 ## [46] 4.5 4.6 4.7 4.8 4.9 5.0 5.1 5.2 5.3 5.4 5.5 5.6 5.7 5.8 5.9 ## [61] 6.0 6.1 6.2 6.3 6.4 6.5 6.6 6.7 6.8 6.9 7.0 7.1 7.2 7.3 7.4 ## [76] 7.5 7.6 7.7 7.8 7.9 8.0 8.1 8.2 8.3 8.4 8.5 8.6 8.7 8.8 8.9 ## [91] 9.0 9.1 9.2 9.3 9.4 9.5 9.6 9.7 9.8 9.9 10.0 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{seq_}\DecValTok{2}\NormalTok{ <-}\StringTok{ }\KeywordTok{seq}\NormalTok{(}\FloatTok{10.0}\NormalTok{, }\FloatTok{0.0}\NormalTok{, }\DataTypeTok{by =} \FloatTok{-0.1}\NormalTok{)} \KeywordTok{print}\NormalTok{(seq_}\DecValTok{2}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 10.0 9.9 9.8 9.7 9.6 9.5 9.4 9.3 9.2 9.1 9.0 8.9 8.8 8.7 8.6 ## [16] 8.5 8.4 8.3 8.2 8.1 8.0 7.9 7.8 7.7 7.6 7.5 7.4 7.3 7.2 7.1 ## [31] 7.0 6.9 6.8 6.7 6.6 6.5 6.4 6.3 6.2 6.1 6.0 5.9 5.8 5.7 5.6 ## [46] 5.5 5.4 5.3 5.2 5.1 5.0 4.9 4.8 4.7 4.6 4.5 4.4 4.3 4.2 4.1 ## [61] 4.0 3.9 3.8 3.7 3.6 3.5 3.4 3.3 3.2 3.1 3.0 2.9 2.8 2.7 2.6 ## [76] 2.5 2.4 2.3 2.2 2.1 2.0 1.9 1.8 1.7 1.6 1.5 1.4 1.3 1.2 1.1 ## [91] 1.0 0.9 0.8 0.7 0.6 0.5 0.4 0.3 0.2 0.1 0.0 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{seq_square <-}\StringTok{ }\NormalTok{(seq_}\DecValTok{2}\NormalTok{)}\OperatorTok{*}\NormalTok{(seq_}\DecValTok{2}\NormalTok{)} \KeywordTok{print}\NormalTok{(seq_square)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 100.00 98.01 96.04 94.09 92.16 90.25 88.36 86.49 84.64 82.81 ## [11] 81.00 79.21 77.44 75.69 73.96 72.25 70.56 68.89 67.24 65.61 ## [21] 64.00 62.41 60.84 59.29 57.76 56.25 54.76 53.29 51.84 50.41 ## [31] 49.00 47.61 46.24 44.89 43.56 42.25 40.96 39.69 38.44 37.21 ## [41] 36.00 34.81 33.64 32.49 31.36 30.25 29.16 28.09 27.04 26.01 ## [51] 25.00 24.01 23.04 22.09 21.16 20.25 19.36 18.49 17.64 16.81 ## [61] 16.00 15.21 14.44 13.69 12.96 12.25 11.56 10.89 10.24 9.61 ## [71] 9.00 8.41 7.84 7.29 6.76 6.25 5.76 5.29 4.84 4.41 ## [81] 4.00 3.61 3.24 2.89 2.56 2.25 1.96 1.69 1.44 1.21 ## [91] 1.00 0.81 0.64 0.49 0.36 0.25 0.16 0.09 0.04 0.01 ## [101] 0.00 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \NormalTok{seq_square_new <-}\StringTok{ }\NormalTok{(seq_}\DecValTok{2}\NormalTok{)}\OperatorTok{^}\DecValTok{2} \KeywordTok{print}\NormalTok{(seq_square_new)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 100.00 98.01 96.04 94.09 92.16 90.25 88.36 86.49 84.64 82.81 ## [11] 81.00 79.21 77.44 75.69 73.96 72.25 70.56 68.89 67.24 65.61 ## [21] 64.00 62.41 60.84 59.29 57.76 56.25 54.76 53.29 51.84 50.41 ## [31] 49.00 47.61 46.24 44.89 43.56 42.25 40.96 39.69 38.44 37.21 ## [41] 36.00 34.81 33.64 32.49 31.36 30.25 29.16 28.09 27.04 26.01 ## [51] 25.00 24.01 23.04 22.09 21.16 20.25 19.36 18.49 17.64 16.81 ## [61] 16.00 15.21 14.44 13.69 12.96 12.25 11.56 10.89 10.24 9.61 ## [71] 9.00 8.41 7.84 7.29 6.76 6.25 5.76 5.29 4.84 4.41 ## [81] 4.00 3.61 3.24 2.89 2.56 2.25 1.96 1.69 1.44 1.21 ## [91] 1.00 0.81 0.64 0.49 0.36 0.25 0.16 0.09 0.04 0.01 ## [101] 0.00 \end{verbatim} \begin{itemize} \item Here is a way to create your own data sets that are random samples. \item Again, on your own, play around with the arguments in the parentheses to see what happens. \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{x <-}\StringTok{ }\KeywordTok{rnorm}\NormalTok{ (}\DecValTok{10000}\NormalTok{, }\DecValTok{0}\NormalTok{, }\DecValTok{10}\NormalTok{)} \NormalTok{y <-}\StringTok{ }\KeywordTok{sample}\NormalTok{ (}\DecValTok{1}\OperatorTok{:}\DecValTok{10000}\NormalTok{, }\DecValTok{10000}\NormalTok{, }\DataTypeTok{replace =}\NormalTok{ T)} \NormalTok{xy <-}\StringTok{ }\KeywordTok{cbind}\NormalTok{(x,y)} \KeywordTok{plot}\NormalTok{(x,y) } \end{Highlighting} \end{Shaded} \includegraphics[width=1\linewidth]{foundational_statistics_files/figure-latex/Samples from distributions 1-1} \begin{itemize} \item You've probably figured out that ``y'' from the last example is a draw of numbers with equal probability (what we call a flat, or uniform distribution). \item What if you want to draw from a defined probability distribution, like the normal distribution? \item Again, play around with the arguments in the parentheses to see what happens. \end{itemize} \begin{Shaded} \begin{Highlighting}[] \NormalTok{x <-}\KeywordTok{rnorm}\NormalTok{(}\DecValTok{100}\NormalTok{, }\DecValTok{0}\NormalTok{, }\DecValTok{100}\NormalTok{)} \KeywordTok{print}\NormalTok{ (x)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 105.918483 -13.427842 -236.325752 82.240362 183.292047 -9.895720 ## [7] 15.284438 171.152376 -125.369431 -116.512568 42.973386 -61.631609 ## [13] -41.827666 73.963259 99.319089 44.484575 23.442551 49.735638 ## [19] 185.857463 -18.906007 -108.837792 11.166414 -13.582855 -248.842345 ## [25] -111.288970 -38.391417 -48.706890 -176.858282 -41.341516 -115.546084 ## [31] 89.521530 -69.666600 -78.618627 79.693080 -32.951424 94.069324 ## [37] 54.016034 135.619856 4.179547 -117.484767 -16.702104 17.219414 ## [43] -178.484648 -66.954464 57.744050 133.696483 -103.054699 -77.804300 ## [49] 4.603048 154.257896 -140.116148 -15.338389 50.054714 -12.860603 ## [55] -231.784365 -140.849051 154.214165 153.083460 120.307083 -69.539764 ## [61] 7.218369 4.508274 6.257644 -88.816536 14.027561 13.399696 ## [67] 64.183552 87.239639 67.874787 -40.331148 0.932919 100.249029 ## [73] 99.594719 42.419632 62.701498 14.999322 97.482240 -16.252798 ## [79] 19.287250 82.297462 100.301161 131.985991 44.626352 -173.733005 ## [85] -42.567424 -19.110627 33.482211 184.802727 54.068843 -122.722194 ## [91] 33.618243 -137.701093 101.180155 -115.183392 -108.732626 -2.250534 ## [97] 81.930371 -62.058659 19.572643 -3.868557 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{hist}\NormalTok{(x, }\DataTypeTok{xlim =} \KeywordTok{c}\NormalTok{(}\OperatorTok{-}\DecValTok{50}\NormalTok{,}\DecValTok{50}\NormalTok{))} \end{Highlighting} \end{Shaded} \includegraphics[width=1\linewidth]{foundational_statistics_files/figure-latex/Samples from distributions 2-1} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{hist}\NormalTok{(x, }\DataTypeTok{xlim =} \KeywordTok{c}\NormalTok{(}\OperatorTok{-}\DecValTok{500}\NormalTok{,}\DecValTok{500}\NormalTok{))} \end{Highlighting} \end{Shaded} \includegraphics[width=1\linewidth]{foundational_statistics_files/figure-latex/Samples from distributions 2-2} Can you figure out what the three rnorm() arguments represent? \hypertarget{basic-summary-statistics}{% \subsubsection{Basic Summary Statistics}\label{basic-summary-statistics}} We will get into the details regarding summary statistics later, but for now, check out several of the \texttt{R} functions that calculate them. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{mean}\NormalTok{(x)} \KeywordTok{median}\NormalTok{(x)} \KeywordTok{var}\NormalTok{(x)} \KeywordTok{log}\NormalTok{(x)} \KeywordTok{ln}\NormalTok{(x)} \KeywordTok{sqrt}\NormalTok{(x)} \KeywordTok{sum}\NormalTok{(x)} \KeywordTok{length}\NormalTok{(x)} \KeywordTok{sample}\NormalTok{(x, }\DataTypeTok{replace =}\NormalTok{ T)} \end{Highlighting} \end{Shaded} \begin{itemize} \item Notice that the last function (\texttt{sample}) has an argument (\texttt{replace=T}) \item Arguments simply modify or direct the function in some way \item There are many arguments for each function, some of which are defaults \end{itemize} \hypertarget{getting-help-to-understand-functions}{% \subsubsection{Getting help to understand functions}\label{getting-help-to-understand-functions}} \begin{itemize} \item Getting help on any function is very easy - just type a question mark and the name of the function. \item There are functions for just about anything within \texttt{R} and it is easy enough to write your own functions if none already exist to do what you want to do. \item In general, function calls have a simple structure: a function name, a set of parentheses and an optional set of arguments you assign parameters to and send to the function. \item Help pages exist for all functions that, at a minimum, explain what parameters exist for the function. \item Help can be accessed a few ways - try them : \end{itemize} \begin{Shaded} \begin{Highlighting}[] \OperatorTok{-}\StringTok{ }\KeywordTok{help}\NormalTok{(mean)} \OperatorTok{-}\StringTok{ }\NormalTok{?mean} \OperatorTok{-}\StringTok{ }\KeywordTok{example}\NormalTok{(mean)} \OperatorTok{-}\StringTok{ }\KeywordTok{help.search}\NormalTok{(}\StringTok{"mean"}\NormalTok{)} \OperatorTok{-}\StringTok{ }\KeywordTok{apropos}\NormalTok{(}\StringTok{"mean"}\NormalTok{)} \OperatorTok{-}\StringTok{ }\KeywordTok{args}\NormalTok{(mean)} \end{Highlighting} \end{Shaded} \hypertarget{exercises-associated-with-this-chapter-1}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-1}} \begin{itemize} \tightlist \item Exercise 2 (\texttt{rtutorial\_1} in \texttt{foundstats} R package) \end{itemize} \hypertarget{additional-learning-resources-1}{% \section{Additional learning resources:}\label{additional-learning-resources-1}} \begin{itemize} \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \item \url{http://library.open.oregonstate.edu/computationalbiology/} - O'Neil, S.T. 2017. A Primer for Computational Biology \end{itemize} \hypertarget{more-r-functions-complex-objects-basic-plotting-and-rmarkdown}{% \chapter{More R Functions, Complex Objects, Basic Plotting, and RMarkdown}\label{more-r-functions-complex-objects-basic-plotting-and-rmarkdown}} \hypertarget{background-1}{% \section{Background}\label{background-1}} In this chapter we will cover a variety of topics, all of which will help you build your \texttt{R} programming skills and make you capable of dealing with data sets using \texttt{R}. We will explore additional base \texttt{R} functions that are extremely useful for generating and manipulating vectors, combining vectors into multidimensional \texttt{R} objects, and working with those objects. We will also cover base \texttt{R} plotting functions to get you started with making your own publication-quality plots. Finally, we will touch on the RMarkdown file format, how to write those files in \texttt{RStudio}, and how to render the \texttt{.Rmd} file into polished, readable \texttt{.html} documents. \hypertarget{more-on-functions}{% \section{More on functions}\label{more-on-functions}} In the last chapter we touched on functions in \texttt{R}, gave a few examples of commonly used functions, and covered how to learn more about a function using the \texttt{help()} function. As mentioned, functions and their use follow a basic structure. To call functions we type their name and include a set of parameters expressed as arguments, which specify what we want them to do, inside parentheses \texttt{()}. For example, to successfully call the function \texttt{mean()}, we need, at minimum, to supply a vector of numeric values. That vector can be an obect we have already assigned in our environment, or it can be the outcome of another function called within the \texttt{mean()} function. Below are these two alternatives. \begin{Shaded} \begin{Highlighting}[] \NormalTok{z <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\DecValTok{10}\NormalTok{, }\DecValTok{20}\NormalTok{, }\DecValTok{30}\NormalTok{)} \KeywordTok{mean}\NormalTok{(z)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 20 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{mean}\NormalTok{(}\KeywordTok{c}\NormalTok{(}\DecValTok{10}\NormalTok{, }\DecValTok{20}\NormalTok{, }\DecValTok{30}\NormalTok{))} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 20 \end{verbatim} The second alternative illustrates the power of ``nesting'' functions within \texttt{R}. You don't need to perform tasks by defining a bunch of intermediate objects and calling functions in piecemeal manner. In many cases it is much more efficient to nest functions within one another, as long as it doesn't jeopardize the functionality or readability of your code. Base \texttt{R} includes dozens of useful functions that will become part of your regular arsenal. We have already mentioned several of these and discussed how to discover and learn more about them. As you become a more advanced \texttt{R} user, and in particular as you begin performing tasks and analyses more specific to your field of study, you will need to use functions that are not included in the base \texttt{R} library. Fortunately, there are thousands of functions distributed in the form of \texttt{R} ``packages,'' which you can easily install on your system. Packages especially easy to find and use are those distributed via the Comprehensive R Archive Network (CRAN): \url{https://cran.r-project.org/web/packages/index.html}. If you find a specific function or set of functions you are interested in trying out, for instance after a Google search of your problem, you can download and install the package those functions belong to by running the following command from your \texttt{R} Console: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{install.packages}\NormalTok{(}\StringTok{"name_of_package"}\NormalTok{)} \end{Highlighting} \end{Shaded} Note that the name of the package has to be spelled correctly (and \texttt{R} is case sensitive), and that the name of the package should be in quotation marks. You will get a series of messages printed to the Console, and finally either a confirmation of installation or error message. Once you have installed a package successfully, you do not need to re-run the \texttt{install.packages()} function. If you want to check whether a package has already been installed, and look at the details of that installation, you can always run the following from the Console: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{installed.packages}\NormalTok{(}\StringTok{"name_of_package"}\NormalTok{)} \end{Highlighting} \end{Shaded} To actually use the functions from an installed package, you have to ``load'' that package into your current working environment. To do that we use the \texttt{library()} function: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(name_of_package)} \end{Highlighting} \end{Shaded} Note that you do not include quotation marks around the package name for the \texttt{library()} function. Unlike package installation, you will need to invoke \texttt{library()} every time you start a new \texttt{R} session to load the package and its functions. It is also possible, and quite straightforward, to write your own \texttt{R} functions, which you can define within your \texttt{.R} or \texttt{.Rmd} scripts for convenient usage. If you get the the point at which you want to distribute your own functions in the form of a package, that is possible too. Later during this course we will get a little experience in writing simple \texttt{R} functions. Writing more involved functions and publishing packages, however, are topics for a more advanced \texttt{R} course. \hypertarget{more-base-r-functions-useful-for-working-with-vectors}{% \subsection{\texorpdfstring{More base \texttt{R} functions useful for working with vectors}{More base R functions useful for working with vectors}}\label{more-base-r-functions-useful-for-working-with-vectors}} Below are annotated lists of base \texttt{R} functions commonly used to work with vectors. We will not take the time here to give specific examples for each function, because their usage is quite straightforward and you will get plenty of practice with them in associated exercies. You can also practice using the \texttt{help()} function if you have specific questions. \textbf{The following functions provide information about vectors:} \begin{itemize} \item \texttt{head()}: returns the first elements of an object (like a vector or data frame) \item \texttt{tail()}: returns the last elements of an object (like a vector or data frame) \item \texttt{length()}: returns the number of elements in a vector \item \texttt{class()}: returns the class of elements in a vector (e.g. ``character'', ``numeric'', ``factor'', etc.) \end{itemize} \textbf{The following functions can modify or generate vectors in structured ways:} \begin{itemize} \item \texttt{sort()}: returns a sorted vector from an orignal vector of numeric values \item \texttt{seq()}: returns a ``series'' of numeric values beginning at one value and ending at another, while also specifying the size of increments/decrements between values \item \texttt{rep()}: returns a vector of identical elements, repeated a specified number of times \end{itemize} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{rep}\NormalTok{(}\DecValTok{1}\NormalTok{, }\DecValTok{5}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 1 1 1 1 1 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{rep}\NormalTok{(}\StringTok{"one"}\NormalTok{, }\DecValTok{5}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "one" "one" "one" "one" "one" \end{verbatim} Note that \texttt{seq()} and \texttt{rep()} can be repeated and/or combined in various ways, in some cases using \texttt{c()}, to generate vectors in a multitude of patterned ways. \textbf{The following functions can generate vectors of random values, randomly shuffle vectors, or generate vectors of values drawn from defined probability distributions:} \begin{itemize} \item \texttt{sample()}: randomly selects and returns elements from a vector (``shuffles'' a vector when size argument is set to original vector size and replace argument is set to ``FALSE'') \item \texttt{rnorm()}: randomly draws values from a theoretical normal distribution \item \texttt{rbinom()}: randomly draws values from a theoretical binomial distribution \item \texttt{set.seed()}: sets \texttt{R}'s random number generator seed so that operations with stochastic properties can be reproduced \end{itemize} \textbf{The following functions can change the class of elements in a particular vector:} \begin{itemize} \item \texttt{as.numeric()}: changes the class of objects in a vector to ``numeric''. \item \texttt{as.factor()}: changes the class of objects in a vector to ``factor''. \item \texttt{as.character()}: changes the class of objects in a vector to ``character''. \end{itemize} The \texttt{as.xxx} family of \texttt{R} functions is especially useful if you need to convert the class of a particular object for a given function to use the object properly. \hypertarget{indexing-vectors}{% \section{Indexing vectors}\label{indexing-vectors}} Now that we are quite familiar with different ways for generating vectors, let's discuss how we isolate specific elements from those vectors. This process is called ``indexing,'' and in \texttt{R} simple numeric (or ``positional'') indexing is intuitively based on integers, starting from ``1''. We use the square braces for numeric indexing in \texttt{R}: \texttt{{[}{]}}. For example if we want to index the first element in a vector, we simply type \texttt{{[}1{]}} after the vector. Indexing can be performed on a defined vector, or on the fly using the immediate output of a function call. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Using our vector z from above} \NormalTok{z[}\DecValTok{1}\NormalTok{]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 10 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## On the fly using output from the c() function} \KeywordTok{c}\NormalTok{(}\DecValTok{10}\NormalTok{, }\DecValTok{20}\NormalTok{, }\DecValTok{30}\NormalTok{)[}\DecValTok{1}\NormalTok{]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 10 \end{verbatim} To isolate a series of consecutive elements from a vector, we simply use the \texttt{:} character. For example, if we want to index the first (or last) 4 elements from the vector below we could do this, respectively: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{c}\NormalTok{(}\DecValTok{10}\NormalTok{, }\DecValTok{20}\NormalTok{, }\DecValTok{30}\NormalTok{, }\DecValTok{40}\NormalTok{, }\DecValTok{50}\NormalTok{, }\DecValTok{100}\NormalTok{, }\DecValTok{200}\NormalTok{)[}\DecValTok{1}\OperatorTok{:}\DecValTok{4}\NormalTok{]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 10 20 30 40 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{c}\NormalTok{(}\DecValTok{10}\NormalTok{, }\DecValTok{20}\NormalTok{, }\DecValTok{30}\NormalTok{, }\DecValTok{40}\NormalTok{, }\DecValTok{50}\NormalTok{, }\DecValTok{100}\NormalTok{, }\DecValTok{200}\NormalTok{)[}\DecValTok{4}\OperatorTok{:}\DecValTok{7}\NormalTok{]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 40 50 100 200 \end{verbatim} For indexing discontinuous elements, we can use our old friend, the \texttt{c()} function inside of the square braces. So, if we want to index the first 3 and the 5th elements: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{c}\NormalTok{(}\DecValTok{10}\NormalTok{, }\DecValTok{20}\NormalTok{, }\DecValTok{30}\NormalTok{, }\DecValTok{40}\NormalTok{, }\DecValTok{50}\NormalTok{, }\DecValTok{100}\NormalTok{, }\DecValTok{200}\NormalTok{)[}\KeywordTok{c}\NormalTok{(}\DecValTok{1}\OperatorTok{:}\DecValTok{3}\NormalTok{, }\DecValTok{5}\NormalTok{)]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 10 20 30 50 \end{verbatim} Finally, we can use the \texttt{-} character to index all elements of a vector, ``minus'' other elements. When excluding even consecutive elements, however, we have to include \texttt{c()}. For instance, if we want all \textbf{except} the first 2 elements, we could do: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{c}\NormalTok{(}\DecValTok{10}\NormalTok{, }\DecValTok{20}\NormalTok{, }\DecValTok{30}\NormalTok{, }\DecValTok{40}\NormalTok{, }\DecValTok{50}\NormalTok{, }\DecValTok{100}\NormalTok{, }\DecValTok{200}\NormalTok{)[}\OperatorTok{-}\KeywordTok{c}\NormalTok{(}\DecValTok{1}\OperatorTok{:}\DecValTok{2}\NormalTok{)]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 30 40 50 100 200 \end{verbatim} \hypertarget{more-complex-data-objects-in-r}{% \section{\texorpdfstring{More complex data objects in \texttt{R}}{More complex data objects in R}}\label{more-complex-data-objects-in-r}} Vectors are extremely important object types in \texttt{R}, for the reasons and examples we have already discussed. Other types of objects in \texttt{R} are also important, and necessary to learn about to do meaningful and efficient work. These other types of objects are more complex than vectors, but they can, in many cases, be composed of vectors. \hypertarget{lists}{% \subsection{Lists}\label{lists}} Lists in \texttt{R} are aggregates of different objects, and those objects can be a mixed variety of types. For example, a list could be an aggregate of 3 different vectors, even if those vectors are different lengths and contain elements of a different class. We can generate lists using the \texttt{list()} function. \begin{Shaded} \begin{Highlighting}[] \NormalTok{vec1 <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\DecValTok{10}\NormalTok{, }\DecValTok{20}\NormalTok{, }\DecValTok{30}\NormalTok{, }\DecValTok{40}\NormalTok{, }\DecValTok{50}\NormalTok{, }\DecValTok{100}\NormalTok{, }\DecValTok{200}\NormalTok{)} \NormalTok{vec2 <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"happy"}\NormalTok{, }\StringTok{"sad"}\NormalTok{, }\StringTok{"grumpy"}\NormalTok{)} \NormalTok{vec3 <-}\StringTok{ }\KeywordTok{factor}\NormalTok{(}\KeywordTok{c}\NormalTok{(}\StringTok{"high"}\NormalTok{, }\StringTok{"low"}\NormalTok{))} \NormalTok{mylist <-}\StringTok{ }\KeywordTok{list}\NormalTok{(vec1, vec2, vec3)} \KeywordTok{print}\NormalTok{(mylist)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [[1]] ## [1] 10 20 30 40 50 100 200 ## ## [[2]] ## [1] "happy" "sad" "grumpy" ## ## [[3]] ## [1] high low ## Levels: high low \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{class}\NormalTok{(mylist)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "list" \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{str}\NormalTok{(mylist)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## List of 3 ## $ : num [1:7] 10 20 30 40 50 100 200 ## $ : chr [1:3] "happy" "sad" "grumpy" ## $ : Factor w/ 2 levels "high","low": 1 2 \end{verbatim} Let's take note of a few things from the output above. First, notice that each of the three vectors in \texttt{mylist} has a numeric (positional) index. Unlike individual vectors, however, primary elements of lists are indexed by double square braces \texttt{{[}{[}{]}{]}}. So, if we want to index the \texttt{vec2} element of \texttt{mylist}, we type: \begin{Shaded} \begin{Highlighting}[] \NormalTok{mylist[[}\DecValTok{2}\NormalTok{]]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "happy" "sad" "grumpy" \end{verbatim} Taking it one step further, if we want to index the 2nd element of the \texttt{vec2} element of \texttt{mylist}, we type: \begin{Shaded} \begin{Highlighting}[] \NormalTok{mylist[[}\DecValTok{2}\NormalTok{]][}\DecValTok{2}\NormalTok{]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "sad" \end{verbatim} The other things we should note from our exploration of \texttt{mylist} above is that 1. It has a class when we call the \texttt{class()} function, and 2. We see a nice breakdown of the 3 components that make up \texttt{mylist} when we call the \texttt{str()} function. \texttt{str()}, which is short for ``structure,'' is an especially useful function for trying to understand the organization of complex objects in \texttt{R}. \hypertarget{data-frames}{% \subsection{Data frames}\label{data-frames}} There is a special class of list we very often work with in \texttt{R} called a ``data frame.'' You can think of data frames as an especially useful organizing structure for data sets. Data frames are lists of vectors, but the vectors have to be the same length. Also, the vectors (officially known as ``columns'') in data frames have names we refer to as ``column names,'' and the rows also have names. For the types of analysis we will be dealing with in this course, it helps to organize our data so that variables in our study correspond to columns and observations correspond to rows. Let's explore some practical details regarding the generation and use of data frames. \hypertarget{creating-data-frames-in-r}{% \subsubsection{\texorpdfstring{Creating data frames in \texttt{R}}{Creating data frames in R}}\label{creating-data-frames-in-r}} We can generate data frames manually, like we did with the list \texttt{mylist} above. Here, for example, we can set up three variables (habitat, temp and elevation) as vectors. \begin{Shaded} \begin{Highlighting}[] \NormalTok{habitat <-}\StringTok{ }\KeywordTok{factor}\NormalTok{(}\KeywordTok{c}\NormalTok{(}\StringTok{"mixed"}\NormalTok{, }\StringTok{"wet"}\NormalTok{, }\StringTok{"wet"}\NormalTok{, }\StringTok{"wet"}\NormalTok{, }\StringTok{"dry"}\NormalTok{, }\StringTok{"dry"}\NormalTok{, }\StringTok{"dry"}\NormalTok{,}\StringTok{"mixed"}\NormalTok{))} \NormalTok{temp <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\FloatTok{3.4}\NormalTok{, }\FloatTok{3.4}\NormalTok{, }\FloatTok{8.4}\NormalTok{, }\DecValTok{3}\NormalTok{, }\FloatTok{5.6}\NormalTok{, }\FloatTok{8.1}\NormalTok{, }\FloatTok{8.3}\NormalTok{, }\FloatTok{4.5}\NormalTok{)} \NormalTok{elevation <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\DecValTok{0}\NormalTok{, }\FloatTok{9.2}\NormalTok{, }\FloatTok{3.8}\NormalTok{, }\DecValTok{5}\NormalTok{, }\FloatTok{5.6}\NormalTok{, }\FloatTok{4.1}\NormalTok{, }\FloatTok{7.1}\NormalTok{, }\FloatTok{5.3}\NormalTok{)} \end{Highlighting} \end{Shaded} Then we can use the \texttt{data.frame()} function to incorporate the vectors into columns of the data frame. \begin{Shaded} \begin{Highlighting}[] \NormalTok{mydata <-}\StringTok{ }\KeywordTok{data.frame}\NormalTok{(habitat, temp, elevation)} \KeywordTok{row.names}\NormalTok{(mydata) <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"Reedy Lake"}\NormalTok{, }\StringTok{"Pearcadale"}\NormalTok{, }\StringTok{"Warneet"}\NormalTok{, }\StringTok{"Cranbourne"}\NormalTok{, } \StringTok{"Lysterfield"}\NormalTok{, }\StringTok{"Red Hill"}\NormalTok{, }\StringTok{"Devilbend"}\NormalTok{, }\StringTok{"Olinda"}\NormalTok{)} \end{Highlighting} \end{Shaded} Note above that we used a function called \texttt{row.names} to assign row names to \texttt{mydata}. The function \texttt{colnames()} does the same, but for column names. \hypertarget{working-with-pre-loaded-base-r-data-frames.}{% \subsubsection{\texorpdfstring{Working with pre-loaded base \texttt{R} data frames.}{Working with pre-loaded base R data frames.}}\label{working-with-pre-loaded-base-r-data-frames.}} There are a few data frames that are available to work with whenever you begin an \texttt{R} session. These can be a great way to practice plotting and analysis, and in fact many examples written to accompany \texttt{R} functions include these data frames to promote reproducibility and convenience. Two of these pre-loaded data frames that are especially popular are \texttt{mtcars} and \texttt{iris}. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{head}\NormalTok{(mtcars)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## mpg cyl disp hp drat wt qsec vs am gear carb ## Mazda RX4 21.0 6 160 110 3.90 2.620 16.46 0 1 4 4 ## Mazda RX4 Wag 21.0 6 160 110 3.90 2.875 17.02 0 1 4 4 ## Datsun 710 22.8 4 108 93 3.85 2.320 18.61 1 1 4 1 ## Hornet 4 Drive 21.4 6 258 110 3.08 3.215 19.44 1 0 3 1 ## Hornet Sportabout 18.7 8 360 175 3.15 3.440 17.02 0 0 3 2 ## Valiant 18.1 6 225 105 2.76 3.460 20.22 1 0 3 1 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{head}\NormalTok{(iris)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Sepal.Length Sepal.Width Petal.Length Petal.Width Species ## 1 5.1 3.5 1.4 0.2 setosa ## 2 4.9 3.0 1.4 0.2 setosa ## 3 4.7 3.2 1.3 0.2 setosa ## 4 4.6 3.1 1.5 0.2 setosa ## 5 5.0 3.6 1.4 0.2 setosa ## 6 5.4 3.9 1.7 0.4 setosa \end{verbatim} \hypertarget{reading-in-data-frames-in-r}{% \subsubsection{\texorpdfstring{Reading in data frames in \texttt{R}}{Reading in data frames in R}}\label{reading-in-data-frames-in-r}} A strength of \texttt{R} is being able to import data from an external source. For example, if you have a comma- or tab- separated text file (like the UNIX-friendly formats we discussed previously), it can be easily read into \texttt{R}, by default as a data frame. One function for accomplishing this is \texttt{read.table()}, although functions like \texttt{read.delim()} can be similarly applied. Two important arguments for \texttt{read.table()} are ``header'' and ``row.names'', which indicate that there is a header row (with column names) and row label column (with row names), respectively. You also need to supply the file path and name in quotation marks (no path necessary if the file is in the current working directory), and what character is used as the field (column) delimiter. Here is an example: \begin{Shaded} \begin{Highlighting}[] \NormalTok{YourFile <-}\StringTok{ }\KeywordTok{read.table}\NormalTok{(}\StringTok{'yourfile.csv'}\NormalTok{, }\DataTypeTok{header=}\NormalTok{T, }\DataTypeTok{row.names=}\DecValTok{1}\NormalTok{, }\DataTypeTok{sep=}\StringTok{','}\NormalTok{)} \NormalTok{YourFile <-}\StringTok{ }\KeywordTok{read.table}\NormalTok{(}\StringTok{'yourfile.txt'}\NormalTok{, }\DataTypeTok{header=}\NormalTok{T, }\DataTypeTok{row.names=}\DecValTok{1}\NormalTok{, }\DataTypeTok{sep=}\StringTok{'}\CharTok{\textbackslash{}t}\StringTok{'}\NormalTok{)} \end{Highlighting} \end{Shaded} \hypertarget{exporting-data-frames-in-r}{% \subsubsection{\texorpdfstring{Exporting data frames in \texttt{R}}{Exporting data frames in R}}\label{exporting-data-frames-in-r}} If you ever want to save a data frame in a format that you can work with outside of \texttt{R}, the \texttt{write.table()} function does pretty much the opposite of its ``read'' counterpart. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{write.table}\NormalTok{(YourFile, }\StringTok{"yourfile.csv"}\NormalTok{, }\DataTypeTok{quote=}\NormalTok{F, }\DataTypeTok{row.names=}\NormalTok{T, }\DataTypeTok{sep=}\StringTok{","}\NormalTok{)} \KeywordTok{write.table}\NormalTok{(YourFile, }\StringTok{"yourfile.txt"}\NormalTok{, }\DataTypeTok{quote=}\NormalTok{F, }\DataTypeTok{row.names=}\NormalTok{T, }\DataTypeTok{sep=}\StringTok{"}\CharTok{\textbackslash{}t}\StringTok{"}\NormalTok{)} \end{Highlighting} \end{Shaded} \hypertarget{indexing-data-frames}{% \subsubsection{Indexing data frames}\label{indexing-data-frames}} Indexing data frames can be acheived in two different ways. We can use numeric (positional) indexing as in the case of vectors and lists (see above). With a data frame, we can index any subset of it using two pieces of information: row coordinates and column coordinates. To accomplish this we use single square braces \texttt{{[},{]}}, in which the row coordinate(s) are typed first, followed by a comma, followed by the column cooridate(s). If we want to index all rows or all columns, we just leave the space to the left or right of the comma blank, respectively. Here are some examples for indexing subsets of the \texttt{iris} data frame. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## The first row, with all columns} \NormalTok{iris[}\DecValTok{1}\NormalTok{,]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Sepal.Length Sepal.Width Petal.Length Petal.Width Species ## 1 5.1 3.5 1.4 0.2 setosa \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## The first 5 rows and the first 2 columns} \NormalTok{iris[}\DecValTok{1}\OperatorTok{:}\DecValTok{5}\NormalTok{,}\DecValTok{1}\OperatorTok{:}\DecValTok{2}\NormalTok{]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Sepal.Length Sepal.Width ## 1 5.1 3.5 ## 2 4.9 3.0 ## 3 4.7 3.2 ## 4 4.6 3.1 ## 5 5.0 3.6 \end{verbatim} With data frames, we can also use the column names to index subsets. To do this we use the \texttt{\$} character after the name of the data frame, followed by the name of the column we want to index. Again, below is a demonstration using \texttt{iris}. Indexing using column names is perhaps the most useful when defining statistical models, a topic we will reach later in the course. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## The first 5 rows of the first column} \NormalTok{iris}\OperatorTok{$}\NormalTok{Sepal.Length[}\DecValTok{1}\OperatorTok{:}\DecValTok{5}\NormalTok{]} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 5.1 4.9 4.7 4.6 5.0 \end{verbatim} \hypertarget{matrices}{% \subsection{Matrices}\label{matrices}} Matrices in \texttt{R} are somewhat similar to data frames, but mixed classes among columns are not permitted, and rows and columns are only positionally indexed as opposed to having names. Positional indexing for matrices, not surprisingly, follows the \texttt{{[}rownumber,\ columnnumber{]}} convention, similar to data frames. A matrix can be generated using the \texttt{matrix()} function, as demonstrated below. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Populate a 3x3 matrix with values 1 to 9} \KeywordTok{matrix}\NormalTok{(}\DecValTok{1}\OperatorTok{:}\DecValTok{9}\NormalTok{, }\DataTypeTok{nrow=}\DecValTok{3}\NormalTok{, }\DataTypeTok{ncol=}\DecValTok{3}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [,1] [,2] [,3] ## [1,] 1 4 7 ## [2,] 2 5 8 ## [3,] 3 6 9 \end{verbatim} \hypertarget{a-few-additional-base-r-functions-for-working-with-complex-r-objects}{% \subsection{\texorpdfstring{A few additional base \texttt{R} functions for working with complex \texttt{R} objects}{A few additional base R functions for working with complex R objects}}\label{a-few-additional-base-r-functions-for-working-with-complex-r-objects}} To add to your foundational knowledge of \texttt{R} functions, below are a few more functions especially useful for working with objects like data frames and matrices. \begin{itemize} \item \texttt{dim()}: returns the number of rows and columns of a data frame or matrix \item \texttt{View()}: opens up a GUI ``viewer'' for visual inspection of data frames (not recommended for large data frames) \item \texttt{cbind()}: combines columns into a single object, which can be used to define or build data frames or matrices \item \texttt{rbind()}: combines rows into a single object, which can be used to define or build data frames or matrices \item \texttt{t()}: transposes a data frame or matrix, such that rows become columns, and columns become rows \end{itemize} \hypertarget{some-brief-notes-on-basic-programming-in-r}{% \section{\texorpdfstring{Some brief notes on basic programming in \texttt{R}}{Some brief notes on basic programming in R}}\label{some-brief-notes-on-basic-programming-in-r}} At some point during your development as an \texttt{R} user you will want to programmatically manipulate \texttt{R} objects in an iterative, repeatable manner to automate tasks like plotting, simulations, and analysis. This use of the \texttt{R} language is especially relevant if you want to write your own functions. Here we touch on a few tools and approaches that will open the door to more powerful programming in \texttt{R}. These are skills that are great to practice and learn, but at a fairly foundational level for now. More advanced programming training in \texttt{R} is beyond the scope of this course. \hypertarget{conditional-statements-with-ifelse}{% \subsection{\texorpdfstring{conditional statements with \texttt{ifelse()}}{conditional statements with ifelse()}}\label{conditional-statements-with-ifelse}} One fundamental structural component of computer programming languages is the idea of conditional statements, which often take the form of ``if/else'' evaluation and execution. The idea is that we can write an algorithm to evaluate a particular statement using a logical operator, and if that statement is true have the program do one thing, but if the statement is false, have it do somehting ``else.'' In \texttt{R} we can write these statements with a structure similar to other languages, but we can also use the single \texttt{R} function \texttt{ifelse()} to accomplish the same thing. The \texttt{ifelse()} function is very easy to use. The first argument is the logical evaluation, the second argument is the action to take if that statement is true, and the third argument is the action to take if false. It is also possible to nest multiple \texttt{ifelse()} function calls wihtin one another, if mulitiple evaluations need to be performed with different outcomes. Below is a simple example for using \texttt{ifelse()} to generate a vector of values (``colors''), based on another vector. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## First define a character vector} \NormalTok{char_vec <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\KeywordTok{rep}\NormalTok{(}\StringTok{"treatment"}\NormalTok{,}\DecValTok{5}\NormalTok{), }\KeywordTok{rep}\NormalTok{(}\StringTok{"control"}\NormalTok{,}\DecValTok{3}\NormalTok{), }\KeywordTok{rep}\NormalTok{(}\StringTok{"treatment"}\NormalTok{, }\DecValTok{4}\NormalTok{), }\KeywordTok{rep}\NormalTok{(}\StringTok{"control"}\NormalTok{, }\DecValTok{6}\NormalTok{))} \KeywordTok{print}\NormalTok{(char_vec)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "treatment" "treatment" "treatment" "treatment" "treatment" "control" ## [7] "control" "control" "treatment" "treatment" "treatment" "treatment" ## [13] "control" "control" "control" "control" "control" "control" \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Generate a vector that stores the color "red" for "treatment" and "blue" for "control"} \NormalTok{col_vec <-}\StringTok{ }\KeywordTok{ifelse}\NormalTok{(char_vec}\OperatorTok{==}\StringTok{"treatment"}\NormalTok{, }\StringTok{"red"}\NormalTok{, }\StringTok{"blue"}\NormalTok{)} \KeywordTok{print}\NormalTok{(col_vec)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] "red" "red" "red" "red" "red" "blue" "blue" "blue" "red" "red" ## [11] "red" "red" "blue" "blue" "blue" "blue" "blue" "blue" \end{verbatim} \hypertarget{the-split-apply-combine-approach-to-data-analysis}{% \section{The Split-Apply-Combine approach to data analysis}\label{the-split-apply-combine-approach-to-data-analysis}} It can often feel like the specific tools and strategies required for data analysis are unique to any given combination of question and data. However, one motif does appear time and time again: the need to take a larger data set, summarize a key variable by one or more grouping variables, and return the result as a new, more directly useful data object. This work flow is often called the `Split-Apply-Combine' approach to data analysis. You first (1) \textbf{Split} your data by some grouping variables of interest e.g.~treatment, (2) \textbf{Apply} a function to another variable for each group separately e.g.~the mean of some response variable, and (3) \textbf{Combine} the output into a new data object e.g.~a named vector or data frame. Because this approach is so common, \texttt{R} has many ready-made functions and packages to make this as easy as possible. \hypertarget{replicate-apply-tapply-and-aggregate}{% \subsection{\texorpdfstring{\texttt{replicate()}, \texttt{apply()}, \texttt{tapply()}, and \texttt{aggregate()}}{replicate(), apply(), tapply(), and aggregate()}}\label{replicate-apply-tapply-and-aggregate}} In some cases we want to repeat a given process over and over again. For example, maybe we want to simulate the sampling process and generate 100 random samples of 100 values from a normal distribution. Fortunately, the \texttt{R} function \texttt{replicate()} makes this very easy. In the example below, we ``shuffle'' the order of the integers 1 through 10 five times using \texttt{replicate()}: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{replicate}\NormalTok{(}\DecValTok{5}\NormalTok{, }\KeywordTok{sample}\NormalTok{(}\DecValTok{1}\OperatorTok{:}\DecValTok{10}\NormalTok{, }\DataTypeTok{size=}\DecValTok{10}\NormalTok{, }\DataTypeTok{replace=}\OtherTok{FALSE}\NormalTok{))} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [,1] [,2] [,3] [,4] [,5] ## [1,] 9 1 7 2 7 ## [2,] 5 4 5 7 10 ## [3,] 10 9 9 3 1 ## [4,] 3 7 6 4 5 ## [5,] 6 6 10 1 4 ## [6,] 2 2 4 8 2 ## [7,] 8 5 2 9 3 ## [8,] 1 10 1 10 6 ## [9,] 4 8 8 6 8 ## [10,] 7 3 3 5 9 \end{verbatim} Note that the first argument is the number of total iterations we want to reproduce, and that the function returns a matrix as output. The \texttt{replicate()} function belongs to a group of functions referred to informally as the ``apply'' family. Another, similar function from this family is simply called \texttt{apply()}, and it can be used to apply a function to either all rows (with the MARGIN argument set to 1) or all columns (with the MARGIN argument set to 2) in a data frame or matrix. This is especially useful for calculating summary statistics for what we call the ``margins'' of data in tables. You might be starting to see how this family of functions could emulate the \textbf{S-A-C} approach to data analysis. A very useful function from this group is \texttt{tapply()}, which allows you to apply a function to one vector (for example a numeric vector in a data frame), in a group-wise manner based on one or more factor vectors that correspond to the numeric vector. In other words, if we want to find the maximum value of variable x for each level of factor y in a data frame, we could use \texttt{tapply()} to do so. Below is an example, again using the \texttt{iris} data frame. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Find the maximum petal length for each species in the iris data frame} \KeywordTok{tapply}\NormalTok{(iris}\OperatorTok{$}\NormalTok{Petal.Length, iris}\OperatorTok{$}\NormalTok{Species, max)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## setosa versicolor virginica ## 1.9 5.1 6.9 \end{verbatim} Note that the first argument is the numerical column, and the second is a factor column. The third is the function we wish to apply, in this case to each species separately. One problem with \texttt{tapply()} is that it only allows you to apply a function to a single variable of interest. What if you would like to summarize \emph{all} relevant variables based on one or more factors? For that, we can use \texttt{aggregate()}. For example, we could collapse the entire \texttt{iris} data frame by finding the average of each numeric column by species. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Find the mean of each variable for each species in the iris data frame} \KeywordTok{aggregate}\NormalTok{(iris, }\DataTypeTok{by =} \KeywordTok{list}\NormalTok{(iris}\OperatorTok{$}\NormalTok{Species), }\DataTypeTok{FUN =}\NormalTok{ mean)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Group.1 Sepal.Length Sepal.Width Petal.Length Petal.Width Species ## 1 setosa 5.006 3.428 1.462 0.246 NA ## 2 versicolor 5.936 2.770 4.260 1.326 NA ## 3 virginica 6.588 2.974 5.552 2.026 NA \end{verbatim} Much like \texttt{tapply()}, the first argument is the data we wish to summarize, the second argument is the grouping variable (which must be passed as a list), and the third argument is our function. Note that in this case, we end up with a pointless \texttt{Species} column, as the \texttt{aggregate()} function attempts to apply a function to \emph{every} variable, including the grouping variable, producing a column of NAs in this case. \texttt{aggregate()} is a highly flexible function - you can choose to select only a subset of data to summarize, specify multiple grouping variables, and aggregate your data using formula notation as well (a topic that will be covered later in this book). \hypertarget{for-loops-in-r}{% \subsection{\texorpdfstring{For loops in \texttt{R}}{For loops in R}}\label{for-loops-in-r}} Another fundamental concept in computer programming is the ``for loop,'' which is an algorithmic strategy for iteratively performing a task according to a pre-defined counter or loop variable, then terminating when the ``loop'' is evaluated as complete. For example, we may want to perform a specific calculation again and again for successive elements of an \texttt{R} object (like a data frame), and build a vector that successively stores the calculation for each iteration of the ``loop.'' We will not devote much time to for loops in \texttt{R} here, because a lot of looping functionality is accessible in \texttt{R} without ever needing to specify a loop. For example, the ``apply'' group of functions accomplish many of the tasks you would otherwise write a for loop to perform. Additionally, remember that \texttt{R} is a vectorized programming language, meaning that many basic operations, like addition or multiplication, are automatically performed iteratively to each element in vectors. Whenever you are considering writing a for loop, carefully consider whether there is a much faster alternative. However, the commented example below illustrates an application in which a for loop is warranted. Note that we ``pre-allocate'' output vectors by creating an empty data frame to store the results. This maximizes for loop speed by circumventing the need to re-write objects as we iteratively add elements. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Calculate mpg/cyl and mpg/wt, for every row in mtcars and if the second is at least twice the size of the first include that ratio and another character value "Yes" in a growing 2-column dataframe. If the ratio is less than 2, then include "No" in the second column. } \CommentTok{## first pre-allocate our new data frame, which contains NAs initially} \NormalTok{newdf <-}\StringTok{ }\KeywordTok{data.frame}\NormalTok{(}\KeywordTok{rep}\NormalTok{(}\OtherTok{NA}\NormalTok{, }\KeywordTok{length}\NormalTok{(mtcars}\OperatorTok{$}\NormalTok{mpg)), }\KeywordTok{rep}\NormalTok{(}\OtherTok{NA}\NormalTok{, }\KeywordTok{length}\NormalTok{(mtcars}\OperatorTok{$}\NormalTok{mpg)))} \CommentTok{## then write the for loop to do the above task for every row in mtcars} \ControlFlowTok{for}\NormalTok{(i }\ControlFlowTok{in} \DecValTok{1}\OperatorTok{:}\KeywordTok{length}\NormalTok{(mtcars}\OperatorTok{$}\NormalTok{mpg)) \{} \NormalTok{ newdf[i,}\DecValTok{1}\NormalTok{] <-}\StringTok{ }\NormalTok{(mtcars}\OperatorTok{$}\NormalTok{mpg[i]}\OperatorTok{/}\NormalTok{mtcars}\OperatorTok{$}\NormalTok{wt[i])}\OperatorTok{/}\NormalTok{(mtcars}\OperatorTok{$}\NormalTok{mpg[i]}\OperatorTok{/}\NormalTok{mtcars}\OperatorTok{$}\NormalTok{cyl[i])} \NormalTok{ newdf[i,}\DecValTok{2}\NormalTok{] <-}\StringTok{ }\KeywordTok{ifelse}\NormalTok{(newdf[i,}\DecValTok{1}\NormalTok{]}\OperatorTok{>=}\DecValTok{2}\NormalTok{, }\StringTok{"Yes"}\NormalTok{, }\StringTok{"No"}\NormalTok{)} \NormalTok{\}} \KeywordTok{print}\NormalTok{(newdf)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## rep.NA..length.mtcars.mpg.. rep.NA..length.mtcars.mpg...1 ## 1 2.290076 Yes ## 2 2.086957 Yes ## 3 1.724138 No ## 4 1.866252 No ## 5 2.325581 Yes ## 6 1.734104 No ## 7 2.240896 Yes ## 8 1.253918 No ## 9 1.269841 No ## 10 1.744186 No ## 11 1.744186 No ## 12 1.965602 No ## 13 2.144772 Yes ## 14 2.116402 Yes ## 15 1.523810 No ## 16 1.474926 No ## 17 1.496726 No ## 18 1.818182 No ## 19 2.476780 Yes ## 20 2.179837 Yes ## 21 1.622718 No ## 22 2.272727 Yes ## 23 2.328967 Yes ## 24 2.083333 Yes ## 25 2.080624 Yes ## 26 2.067183 Yes ## 27 1.869159 No ## 28 2.643754 Yes ## 29 2.523659 Yes ## 30 2.166065 Yes ## 31 2.240896 Yes ## 32 1.438849 No \end{verbatim} In the case above, we used the length of the \texttt{mtcars} data frame (number of rows) to build a pre-allocated (filled with NAs) data frame of the correct size. Then, we also used the values 1 through that length to set up our ``counter'' in the for loop. The loop stops after tasks have been completed for \texttt{i=32}, which corresponds to the final row in \texttt{mtcars}. As mentioned, it's probably better to rely on the other convenient \texttt{R} functions above for iterative processes, but pre-allocation of output objects is the way to go if you do need to rely on a for loop. \hypertarget{fundamentals-of-plotting-in-r}{% \section{\texorpdfstring{Fundamentals of plotting in \texttt{R}}{Fundamentals of plotting in R}}\label{fundamentals-of-plotting-in-r}} The world of plotting in \texttt{R} is incredibly diverse, and there are entire courses dedicated to data visualization using \texttt{R}. Here we will very briefly cover a few of the most useful plotting functions and strategies using base \texttt{R}. This should be enough of an introduction to get you jump started, but you will no doubt discover more appealing and finely tuned strategies to apply in your future as an \texttt{R} user. For example, some people will find that the highly flexible, customizable package \texttt{ggplot2} and its plotting functions are preferable over base \texttt{R}. I encourage you to explore tools like this on your own, once you feel comfortable with \texttt{R} in general. We will also introduce plot- and visualization-related lessons throughout the remainder of the course, as they pertain to the analysis topic at hand. \hypertarget{basic-plotting-with-plot}{% \subsection{\texorpdfstring{Basic plotting with \texttt{plot()}}{Basic plotting with plot()}}\label{basic-plotting-with-plot}} One ``high level'' plotting function in base \texttt{R} is simply called \texttt{plot()}. This function can accomplish many, many plotting goals, so we will start with it. Below, we start by calling \texttt{plot()} on a single vector that we have generated. Spend a little time examining the code, and the arguments passed to \texttt{plot()} in this example. \begin{Shaded} \begin{Highlighting}[] \NormalTok{seq_}\DecValTok{1}\NormalTok{ <-}\StringTok{ }\KeywordTok{seq}\NormalTok{(}\FloatTok{0.0}\NormalTok{, }\FloatTok{10.0}\NormalTok{, }\DataTypeTok{by =} \FloatTok{0.1}\NormalTok{) } \KeywordTok{plot}\NormalTok{(seq_}\DecValTok{1}\NormalTok{, }\DataTypeTok{xlab=}\StringTok{"space"}\NormalTok{, }\DataTypeTok{ylab =}\StringTok{"function of space"}\NormalTok{, }\DataTypeTok{type =} \StringTok{"p"}\NormalTok{, }\DataTypeTok{col =} \StringTok{"red"}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-39-1.pdf} We only supplied the one vector (\texttt{seq\_1}) to \texttt{plot()} in this case, which resulted in the function just defining the x-axis values as the numeric positions (1 to 101) of \texttt{seq\_1}. Also, this style of plot is known as a ``scatterplot.'' There is usually more ``scatter,'' for example when plotting two variables that not perfectly related. With \texttt{plot()}, we usually want to examine the relationship between two different variables, like below: \begin{Shaded} \begin{Highlighting}[] \NormalTok{seq_}\DecValTok{1}\NormalTok{ <-}\StringTok{ }\KeywordTok{seq}\NormalTok{(}\FloatTok{0.0}\NormalTok{, }\FloatTok{10.0}\NormalTok{, }\DataTypeTok{by =} \FloatTok{0.1}\NormalTok{)} \NormalTok{seq_}\DecValTok{2}\NormalTok{ <-}\StringTok{ }\KeywordTok{seq}\NormalTok{(}\FloatTok{10.0}\NormalTok{, }\FloatTok{0.0}\NormalTok{, }\DataTypeTok{by =} \FloatTok{-0.1}\NormalTok{)} \KeywordTok{plot}\NormalTok{(seq_}\DecValTok{1}\NormalTok{, seq_}\DecValTok{2}\NormalTok{, }\DataTypeTok{xlab=}\StringTok{"sequence 1"}\NormalTok{, }\DataTypeTok{ylab =}\StringTok{"sequence 2"}\NormalTok{, }\DataTypeTok{type =} \StringTok{"p"}\NormalTok{, }\DataTypeTok{col =} \StringTok{"red"}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-40-1.pdf} In this example, \texttt{plot()} takes the first argument as the x-axis variable, and the second argument as the y-axis variable. You can also use the \texttt{\textasciitilde{}} to specify variables, but in this case the y-axis variable comes first (\texttt{y\ \textasciitilde{}\ x}). Also note the other arguments, which are usually named pretty intuitively. Note the axis label arguments, the type of object plotted (``p'' stands for ``points''), and the color of the plotted objects. There are many possible arguments, and many are actually set by another function called \texttt{par()}, that \texttt{plot()} calls on internally. One great resource for understanding plotting function arguments is the help menu for \texttt{par()}. I promise, if you become familiar with the \texttt{par()} documentation, you will quickly ascend the ranks of plotting prowess, and it will save you many frustrating moments in the future! I encourage you to study the \texttt{plot()} and \texttt{par()} documentation and practice using some of the other arguments that are especially useful, including ``main'', ``xlim'', ``ylim'', and ``cex'', for example. The nice thing about graphical parameters is that, like many things in \texttt{R}, they are vectorized. So, if we want to use different symbols (look into the ``pch'' argument), colors (``col''), or sizes (look at ``cex'') of points for different observations in something like a data frame, we can supply those in the form of a vector! Taking the example above, if we want to plot the first 10 observations as blue, and the remaining observations as red, we can supply a vector of ``blues'' and ``reds'' in the appropriate order to \texttt{plot()}. \begin{Shaded} \begin{Highlighting}[] \NormalTok{seq_}\DecValTok{1}\NormalTok{ <-}\StringTok{ }\KeywordTok{seq}\NormalTok{(}\FloatTok{0.0}\NormalTok{, }\FloatTok{10.0}\NormalTok{, }\DataTypeTok{by =} \FloatTok{0.1}\NormalTok{)} \NormalTok{seq_}\DecValTok{2}\NormalTok{ <-}\StringTok{ }\KeywordTok{seq}\NormalTok{(}\FloatTok{10.0}\NormalTok{, }\FloatTok{0.0}\NormalTok{, }\DataTypeTok{by =} \FloatTok{-0.1}\NormalTok{)} \KeywordTok{plot}\NormalTok{(seq_}\DecValTok{1}\NormalTok{, seq_}\DecValTok{2}\NormalTok{, }\DataTypeTok{xlab=}\StringTok{"sequence 1"}\NormalTok{, }\DataTypeTok{ylab =}\StringTok{"sequence 2"}\NormalTok{, }\DataTypeTok{type =} \StringTok{"p"}\NormalTok{, } \DataTypeTok{col =} \KeywordTok{c}\NormalTok{(}\KeywordTok{rep}\NormalTok{(}\StringTok{"blue"}\NormalTok{, }\DecValTok{10}\NormalTok{), }\KeywordTok{rep}\NormalTok{(}\StringTok{"red"}\NormalTok{, }\DecValTok{91}\NormalTok{)))} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-41-1.pdf} You can see how this would be a nice way to differentiate among observation types in your data set, and produce an information-rich, single plot, as opposed to producing many plots that highlight single variables. Sometimes we want to include multiple plots, as different panels, in the same figure. Fortunately this is made easy by the \texttt{mfrow} argument within \texttt{par()}. You simply set the dimensions, denoted by number of rows and number of columns in parentheses, before calling \texttt{plot()} repeatedly. \begin{Shaded} \begin{Highlighting}[] \NormalTok{seq_square <-}\StringTok{ }\NormalTok{(seq_}\DecValTok{2}\NormalTok{)}\OperatorTok{*}\NormalTok{(seq_}\DecValTok{2}\NormalTok{)} \NormalTok{seq_square_new <-}\StringTok{ }\NormalTok{(seq_}\DecValTok{2}\NormalTok{)}\OperatorTok{^}\DecValTok{2} \KeywordTok{par}\NormalTok{(}\DataTypeTok{mfrow=}\KeywordTok{c}\NormalTok{(}\DecValTok{2}\NormalTok{,}\DecValTok{2}\NormalTok{))} \KeywordTok{plot}\NormalTok{ (seq_}\DecValTok{1}\NormalTok{, }\DataTypeTok{xlab=}\StringTok{"time"}\NormalTok{, }\DataTypeTok{ylab =}\StringTok{"p in population 1"}\NormalTok{, }\DataTypeTok{type =} \StringTok{"p"}\NormalTok{, }\DataTypeTok{col =} \StringTok{'red'}\NormalTok{)} \KeywordTok{plot}\NormalTok{ (seq_}\DecValTok{2}\NormalTok{, }\DataTypeTok{xlab=}\StringTok{"time"}\NormalTok{, }\DataTypeTok{ylab =}\StringTok{"p in population 2"}\NormalTok{, }\DataTypeTok{type =} \StringTok{"p"}\NormalTok{, }\DataTypeTok{col =} \StringTok{'green'}\NormalTok{)} \KeywordTok{plot}\NormalTok{ (seq_square, }\DataTypeTok{xlab=}\StringTok{"time"}\NormalTok{, }\DataTypeTok{ylab =}\StringTok{"p2 in population 2"}\NormalTok{, }\DataTypeTok{type =} \StringTok{"p"}\NormalTok{, }\DataTypeTok{col =} \StringTok{'blue'}\NormalTok{)} \KeywordTok{plot}\NormalTok{ (seq_square_new, }\DataTypeTok{xlab=}\StringTok{"time"}\NormalTok{, }\DataTypeTok{ylab =}\StringTok{"p in population 1"}\NormalTok{, }\DataTypeTok{type =} \StringTok{"l"}\NormalTok{, }\DataTypeTok{col =} \StringTok{'yellow'}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-42-1.pdf} \hypertarget{histograms-using-hist}{% \subsection{\texorpdfstring{Histograms using \texttt{hist()}}{Histograms using hist()}}\label{histograms-using-hist}} We will talk more about frequency distributions and histograms later in the course, but for now it is a good idea to become familiar with one way to plot them. If we have a quantitative variable, like height, and we want to know what the distribution among individuals looks like, we can use a histogram. The function \texttt{hist()} will help us with this task. To illustrate, below we will sample values from a binomial distribution. Don't worry about what this means now, as we will return to it later, but the scenario is intuitive. Let's say we flip a coin 20 times and record the number of ``heads'' as ``successes,'' and let's further say that we perform this ``20 coin flips'' activity 1000 times. And let's assume that our coin is ``fair,'' such that the probability of getting heads on any given flip is 0.5. We can simulate this process using the \texttt{rbinom()} function and plot the results using \texttt{hist()}. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{hist}\NormalTok{(}\KeywordTok{rbinom}\NormalTok{(}\DataTypeTok{n=}\DecValTok{1000}\NormalTok{, }\DataTypeTok{size=}\DecValTok{20}\NormalTok{, }\DataTypeTok{prob=}\FloatTok{0.5}\NormalTok{), }\DataTypeTok{xlab=}\StringTok{"number of heads"}\NormalTok{, }\DataTypeTok{ylab=}\StringTok{"number of activities"}\NormalTok{,} \DataTypeTok{main=}\StringTok{"Freq. Dist. of Coin Flip Successes"}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/binomial function-1.pdf} Note that, as expected, our most frequent observation is that we get 10 heads out of 20 flips. \hypertarget{boxplots-using-boxplot}{% \subsection{\texorpdfstring{Boxplots using \texttt{boxplot()}}{Boxplots using boxplot()}}\label{boxplots-using-boxplot}} In many cases we want to summarize the distribution of a qunatitiative variable using ``quartiles'' (we'll cover these in depth later), and perhaps we want to do this separately for different observation types in our data set. A boxplot (or ``box and whisker plot,'' depending on how it is drawn), depicts the 1st, 2nd (median), and 3rd quartile for a vector of numeric values using a box. ``Whiskers'' are often added to define ``fences'' beyond which are putative ``outliers.'' The \texttt{boxplot()} function of base \texttt{R} is convenient to use, particularly when your data set is organized in a data frame. Below is a series of simple examples to illustrate the utility of \texttt{boxplot()} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## make a modified version of the iris data frame, which includes a "Region" factor} \NormalTok{new_iris <-}\StringTok{ }\NormalTok{iris} \NormalTok{new_iris}\OperatorTok{$}\NormalTok{Region <-}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(}\KeywordTok{rep}\NormalTok{(}\KeywordTok{c}\NormalTok{(}\KeywordTok{rep}\NormalTok{(}\StringTok{"West"}\NormalTok{, }\DecValTok{5}\NormalTok{), }\KeywordTok{rep}\NormalTok{(}\StringTok{"East"}\NormalTok{, }\DecValTok{5}\NormalTok{)), }\DecValTok{15}\NormalTok{))} \CommentTok{## make a boxplot of Sepal.Length that plots individual boxes for the separate Species} \KeywordTok{boxplot}\NormalTok{(Sepal.Length }\OperatorTok{~}\StringTok{ }\NormalTok{Species, }\DataTypeTok{data=}\NormalTok{new_iris)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-43-1.pdf} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## make a boxplot of Sepal.Length that shows all 6 combinations of factor levels from Species and Region, including a different color for each species} \KeywordTok{boxplot}\NormalTok{(Sepal.Length }\OperatorTok{~}\StringTok{ }\NormalTok{Species}\OperatorTok{*}\NormalTok{Region, }\DataTypeTok{col=}\KeywordTok{c}\NormalTok{(}\StringTok{"blue"}\NormalTok{, }\StringTok{"red"}\NormalTok{, }\StringTok{"yellow"}\NormalTok{, }\StringTok{"blue"}\NormalTok{, }\StringTok{"red"}\NormalTok{, }\StringTok{"yellow"}\NormalTok{),} \DataTypeTok{data=}\NormalTok{new_iris, }\DataTypeTok{names=}\KeywordTok{c}\NormalTok{(}\StringTok{"set_E"}\NormalTok{,}\StringTok{"ver_E"}\NormalTok{,}\StringTok{"vir_E"}\NormalTok{,}\StringTok{"set_W"}\NormalTok{,}\StringTok{"ver_W"}\NormalTok{,}\StringTok{"vir_W"}\NormalTok{))} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-43-2.pdf} Above you can see that by using the \texttt{*} character between the factors ``Species'' and ``Region in our plotting''formula" \texttt{boxplot()} produces a box for each factor level combination. Also, for \texttt{boxplot()} note that the ``col'' argument refers to the boxes themselves, so if we supply a vector of 6 colors, those will be applied to the boxes in order from left to right. Speaking of colors, an almost limitless array of colors can be specified in \texttt{R} plotting functions. Furthermore, colors can be coded using their names, or hexadecimal RGB specification. For a thorough treatment and great resources regarding colors in \texttt{R}, I recommend visiting the links at the bottom of the chapter. \hypertarget{a-brief-introduction-to-rmarkdown}{% \section{\texorpdfstring{A brief introduction to \texttt{RMarkdown}}{A brief introduction to RMarkdown}}\label{a-brief-introduction-to-rmarkdown}} \texttt{RMarkdown} is a language that is distinct from \texttt{R}, but that incorporates \texttt{R} code ``chunks,'' which can be displayed and run if desired in the final, knitted output. The output can be knitted to a variety of file formats, such as \texttt{.html}, \texttt{.pdf}, or even Microsoft Word. For this course we will get into the habit of knitting to \texttt{.html}, which is the least buggy and error-prone in my experience. In the short section below, we will go over the simple steps required to write and knit your first \texttt{.Rmd} file, including the basic style elements of the language and some essential \texttt{R} chunk settings. To get started using \texttt{RMarkdown}, you first need to make sure that you install the package \texttt{rmarkdown} from your Console, using \texttt{install.packages()}. Then, assuming you have an \texttt{RStudio} session running, click on File -\textgreater{} New File -\textgreater{} R Markdown. This will open a window in which you will type the name of your new file and the author's (your) name. A new file in your \texttt{RStudio} script editor pane (the upper left one) should appear. There will be a templated header, along with some other templated code, which you can modify based on your preferences. You may want to get rid of the pdf output line at the top for now, as we will knit to \texttt{.html} for this course. Knitting to \texttt{.pdf} requires some addtional software installation, which we don't have time to troubleshoot during this course. In any case, let's now cover some basic formatting code and code ``chunk'' types. Below I will provide the code you would type in your own \texttt{RMarkdown} file, followed by what it looks like rendered in this \texttt{Bookdown} document, which is built using a collection of \texttt{RMarkdown} files itself! \hypertarget{rmarkdown-formatting-basics}{% \subsection{\texorpdfstring{\texttt{RMarkdown} formatting basics}{RMarkdown formatting basics}}\label{rmarkdown-formatting-basics}} You can include ``nested'' headers (like the one directly above) by using \texttt{\#} symbols. For example this: \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Experiment with headers} \CommentTok{### Try a third-level header} \CommentTok{#### Or a fourth-level header} \end{Highlighting} \end{Shaded} Renders as this: \hypertarget{experiment-with-headers}{% \section{Experiment with headers}\label{experiment-with-headers}} \hypertarget{try-a-third-level-header}{% \subsection{Try a third-level header}\label{try-a-third-level-header}} \hypertarget{or-a-fourth-level-header}{% \subsubsection{Or a fourth-level header}\label{or-a-fourth-level-header}} Text can be rendered in bold, italics, or both like this: \begin{Shaded} \begin{Highlighting}[] \ExtensionTok{Text}\NormalTok{ can easilly be *italicized* or **bolded** or ***both***} \end{Highlighting} \end{Shaded} Which renders as this: Text can easilly be \emph{italicized} or \textbf{bolded} or \textbf{\emph{both}} Links can be included like this: \begin{Shaded} \begin{Highlighting}[] \ExtensionTok{Here}\NormalTok{ is a useful link: [Rmd intro by RStudio](https://rmarkdown.rstudio.com/articles_intro.html)} \ExtensionTok{Here}\NormalTok{ is another: [R Markdown cheat sheet](https://rmarkdown.rstudio.com/lesson-15.html)} \end{Highlighting} \end{Shaded} Which render like this: Here is a useful link: \href{https://rmarkdown.rstudio.com/articles_intro.html}{Rmd intro by RStudio} Here is another: \href{https://rmarkdown.rstudio.com/lesson-15.html}{R Markdown cheat sheet} For many more details on \texttt{RMarkdown} format and coding, I highly recommend the above links. \hypertarget{rmarkdown-code-chunk-options}{% \subsection{\texorpdfstring{\texttt{RMarkdown} code chunk options}{RMarkdown code chunk options}}\label{rmarkdown-code-chunk-options}} Code chunks in \texttt{RMarkdown} exist to show \texttt{R} code, run the code, or both. In every \texttt{RMarkdown} file you write, you will demarcate code chunks with three ``ticks'' at the top of the chuck followed immediately by the chunk options in curly braces, on the same line, and another three ticks (on their own line) below the chunk of code. This is what a coded chunk looks like: \begin{Shaded} \begin{Highlighting}[] \BaseNTok{```\{r, eval = TRUE, echo = TRUE\}} \BaseNTok{seq(1, 10, 1)} \BaseNTok{```} \end{Highlighting} \end{Shaded} Which renders like this: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{seq}\NormalTok{(}\DecValTok{1}\NormalTok{, }\DecValTok{10}\NormalTok{, }\DecValTok{1}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 1 2 3 4 5 6 7 8 9 10 \end{verbatim} Note that in the above example the \texttt{R} code will be both run (``evaluated'') and displayed (``echoed'') in the knitted \texttt{.html} file. If we want to suppress either or both of those from being rendered, we just set the chunk options to ``FALSE''. When your \texttt{RMarkdown} file is completed, save any final changes, and click on the ``Knit'' icon in the toolbar, or click File -\textgreater{} Knit Document. Assuming there are no errors in your code, the rendered \texttt{.html} file should load in a new window for inspection, and the file should be saved in the same location as your \texttt{.Rmd} file. This has been a minimal treatment of \texttt{RMarkdown}, but it should be enough guidance to get you started writing your own \texttt{RMarkdown} scripts. Please consult the aforementioned \texttt{RMarkdown} resources for additional instruction, examples, and help. \hypertarget{exercises-associated-with-this-chapter-2}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-2}} \begin{itemize} \tightlist \item Exercise 2 (\texttt{rtutorial\_1} in \texttt{foundstats} R package) \item Exercise 3 (\texttt{rtutorial\_2} in \texttt{foundstats} R package) \end{itemize} \hypertarget{additional-learning-resources-2}{% \section{Additional learning resources:}\label{additional-learning-resources-2}} \begin{itemize} \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \item \url{http://library.open.oregonstate.edu/computationalbiology/} - O'Neil, S.T. 2017. A Primer for Computational Biology \item \url{http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf} - A nice \texttt{.pdf} menu for many \texttt{R} colors \item \url{https://www.stat.ubc.ca/~jenny/STAT545A/block14_colors.html} - A good introduction to colors in \texttt{R} \item \url{https://medialab.github.io/iwanthue/} - A cool automated color palette selection tool \item \url{https://rmarkdown.rstudio.com/articles_intro.html} - \texttt{RStudio} guide to \texttt{RMarkdown} \item \url{https://rmarkdown.rstudio.com/lesson-15.html} - \texttt{RMarkdown} ``cheat sheet'' \end{itemize} \hypertarget{introduction-to-probability-and-probability-distributions}{% \chapter{Introduction to Probability and Probability Distributions}\label{introduction-to-probability-and-probability-distributions}} \hypertarget{background-2}{% \section{Background}\label{background-2}} In this chapter, we will cover the basics of probability and common probability distributions. Probabilistic thinking can mark a significant departure in how we typically consider mathematics, and the world around us more generally. We have to shelve our natural inclination toward determinism, and embrace random variables, shades of likelihood, and complexity. As we'll see, uncertainty in our estimates is a given. Indeed, the process of statistics is largely about quantifying and managing uncertainty - a process that begins with understanding probability distributions. Frequently, we want to understand how likely a particular observation or set of observations is (e.g.~from a sample of a population), given some expectation. That expectation may be based on a theoretical probability distribution we can use to model variation in nature. In this chapter we will introduce some core concepts of probability and how those pertain to understanding observed \textbf{parameters}, or features, and variation within systems. \hypertarget{what-is-probability}{% \section{What is probability?}\label{what-is-probability}} Statistical probability can be understood from two distinct perspectives: the ``Frequentist'' and the ``Bayesian''. \begin{itemize} \tightlist \item \textbf{Frequency interpretation} ``Probabilities are mathematically convenient approximations to long run relative frequencies.'' \item \textbf{Subjective (Bayesian) interpretation} ``Probability statements are expressions of the opinion of some individual or of current understanding regarding how certain an event is to occur.'' \end{itemize} Both conceptions of probability are widely applied in data analysis, though most of the techniques discussed in this book are rooted in frequentist statistics. \hypertarget{random-variables-probability}{% \section{Random variables \& probability}\label{random-variables-probability}} \textbf{Probability} is the expression of belief in some future outcome based on information about a system, and is typically applied in statistics to variables we want to understand or estimate in the real world. Specifically, a \textbf{random variable} can take on different values at varying probabilities based on its underlying sample space and probability distribution. The \textbf{sample space} of a random variable is the universe of all possible values for that variable. It may be helpful to think of the sample space in the form of a plotted function, where possible values of the random variable make up the x-axis, and the probability of ``drawing'' a particular value at random makes up the y-axis. The \textbf{sample space} can be represented by a \textbf{probability distribution} when our random variable is discrete. By discrete we mean that the variable can take on a limited (finite) number of values. Meristic traits like the number of bristles on the abdomen of an insect or the number of action potentials a neuron experiences in a single window of time can only have positive integer values. Continuous random variables like human height, on the other hand, can in theory take on an infinite number of values, but are in practice limited by our measurement precision. For continuous variables, the sample space is represented by what we call a \textbf{probability density function} (PDF), also called a continuous probability distribution. Probabilities over a sample space \textbf{always sum to 1.0}, meaning that all possible values for that random variable are encompassed by its probability distribution, and we use tools from algebra (for probability distributions) and calculus (for probability density functions) to make use of their properties in statistical modeling and inference. Distributions of random variables can be expressed as functions that have \textbf{moments}. These moments are metrics of a function's shape, and these can be estimated. For example the 1st, 2nd, 3rd and 4th moments of a distribution correspond to the mean, variance, skewness, and kurtosis, respectively. For now let's just consider the first two. \begin{itemize} \tightlist \item The expectation or mean of a random variable X is: \end{itemize} \[E[X] = \sum_{\text{all x}}^{}xP(X=x) = \mu\] \begin{itemize} \tightlist \item Often we want to know how dispersed the random variable is around its mean \item One measure of dispersion is the variance: \end{itemize} \[Var(X) = E[X^2] = \sigma^2\] There are many \textbf{families} or \textbf{forms} of probability distributions, and which ones we apply in statistics depend on the dynamical system we are trying to represent. We will return to the most commonly used ones below. Probability distributions are mathematically defined by features we call \emph{parameters}, which correspond to the moments pointed out above. The parameters of the functions themselves are used to understand properties of the systems we use the functions to model. For example the normal distribution (also called the Gaussian distribution, depicted by a bell curve), which is probably the most famous distribution in statistics, is characterized by 2 parameters: \(mu\) (the mean) and \(sigma^{2}\) (the variance). In practical terms, those parameters dictate the central peak or ``mode'' and the spread (width), respectively. These parameters are clearly important for us in thinking about the systems we study. For example in biology we often think about random variables as values expressed by individual living things. We may consider, in theory, all possible individuals under a given set of circumstances, and one or more random variables associated with those individuals. In statistics we call this theoretical notion of all individuals a \textbf{\emph{population}}. If we can assume that a random variable in that population has a particular probability distribution, it opens the door to estimating the aforementioned population parameters from a \textbf{\emph{random sample}} of that population. Mean height definitely tells us something about the most common values in a population of humans, as does the variability of height among individuals. So you can see how probability distributions, when applied under the appropriate assumptions, help us understand, quantify, and compare random variables in populations. We will further explore how population parameters are estimated from random samples in the next chapter. For now, we will introduce various probability distributions and the random variables they represent. \hypertarget{probability-and-the-bernoulli-distribution}{% \section{Probability and the Bernoulli distribution}\label{probability-and-the-bernoulli-distribution}} To think about probability and probability distributions, let's start with the Bernoulli distribution. It describes the expected outcome of an event with probability \texttt{p}. A simple example of this scenario is the flipping of a coin. If that coin is \textbf{fair}, then the probabilities of heads or tails are \[Pr(X=\text{Head}) = \frac{1}{2} = 0.5 = p \] \[Pr(X=\text{Tails}) = \frac{1}{2} = 0.5 = 1 - p \] If the coin isn't fair then \(p \neq 0.5\). At this point, we don't know whether our coin is fair or not, so let's estimate the Bernoulli distribution of our coin flip by flipping our coin 1000 times and visualize the results. \begin{center}\includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-48-1} \end{center} We can see that our estimated Bernoulli distribution indicates a coin that is very close to fair (see Chapter 13: Frequency Analysis for how we might test this statistically). Note that the probabilities still sum to 1, a property of all probability distributions. \[ p + (1-p) = 1 \] The Bernoulli distribution can be used to represent other binary possibilities, like success or failure, ``yes'' or ``no'' answers, choosing an allele at a biallelic locus from a population, etc\ldots{} \hypertarget{probability-rules}{% \section{Probability rules}\label{probability-rules}} Let's take a moment to cover some basic rules of probability regarding the observation of multiple ``events''. Let's say we flip a fair coin twice. Represent the first flip as `X' and the second flip as `Y'. \texttt{H} indicates a Heads and \texttt{T} a Tails. The probability for any given sequence of both flips is \[ Pr(\text{X=H and Y=H}) = p*p = p^2 \] \[ Pr(\text{X=H and Y=T}) = p*p = p^2 \] \[ Pr(\text{X=T and Y=H}) = p*p = p^2 \] \[ Pr(\text{X=T and Y=T}) = p*p = p^2 \] While the probability of flipping both an \texttt{H} and \texttt{T} in any order is \[ \text{Pr(X=H and Y=T) or Pr(X=T and Y=H)} = \] \[ (p*p) + (p*p) = 2p^{2} \] These are the \textbf{`and'} and \textbf{`or'} rules for the probability of multiple events: \begin{itemize} \tightlist \item `and' means multiply the probabilities to attain the probability of both events happening \item `or' means sum the probabilities to attain the probability of either event happening \item most probability distributions can be built up from these simple rules \end{itemize} \hypertarget{joint-probability}{% \section{Joint probability}\label{joint-probability}} The joint probability is the probability of two or more outcomes co-occurring. Following the `and' rule, \[Pr(X,Y) = Pr(X) * Pr(Y)\]\\ As above, this multiplication is true for two \textbf{independent} events. However, for two non-independent events we also have to take into account their \textbf{covariance}. To do this we need to determine their \textbf{conditional probabilities}. \hypertarget{conditional-probability}{% \section{Conditional probability}\label{conditional-probability}} Variables that are non-independent have a shared variance, which is also known as \textbf{covariance}. You can think of this as two variables that consistently deviate from their respective means. Covariance standardized to a mean of zero and a unit standard deviation is \textbf{correlation}, which we'll discuss in detail in Chapter 12. To assess the probability of two events where they might not be independent, we must considering their conditional probability. \begin{itemize} \tightlist \item The conditional probability for two \textbf{independent} variables: \end{itemize} \[Pr(Y|X) = Pr(Y)\] \[Pr(X|Y) = Pr(X)\] This means that the probability of \texttt{Y} given \texttt{X} is just the probability of \texttt{Y}, and the reverse is true for the probability of \texttt{X} given \texttt{Y}. In other words, the occurrence of event \texttt{X} or \texttt{Y} has no influence on the occurrence of the other event. These variables are therefore independent. \begin{itemize} \tightlist \item The conditional probability for two \textbf{non-independent} variables: \end{itemize} \[Pr(Y|X) \neq Pr(Y)\] \[Pr(X|Y) \neq Pr(X)\] In this case, the probability of \texttt{Y} given \texttt{X} \emph{does not} equal the probability of just \texttt{Y}. Thus, one is influencing the probability of the other. More specifically, when we have two non-independent events, the equation for the conditional probability of one event given the other is \[Pr(Y|X) = \frac{Pr(X|Y)Pr(Y)}{Pr(X)}\] which is also known as \textbf{Bayes' Theorem}. \hypertarget{a-brief-note-on-likelihood-vs.probability}{% \section{A brief note on likelihood vs.~probability}\label{a-brief-note-on-likelihood-vs.probability}} \begin{itemize} \item The \textbf{probability} of an event is the proportion of times that the event would occur if we repeated a random trial over and over again under the same conditions. \item The \textbf{likelihood} is the probability of observing a particular set of data or outcome, given a particular parameter value. \end{itemize} \texttt{L{[}parameter\textbar{}data{]}\ =\ Pr{[}data\textbar{}parameter{]}} Extending from this, the parameter value at which the likelihood is maximized is called the maximum likelihood estimate (MLE). You don't need to worry too much about likelihood in this course, but realize that many of our formualae for estimating parameters from data actually produce maximum likelihood estimates. The formula we use to calculate a mean from a sample of observations, for example, produces the maximum likelihood estimate for the population mean from which that sample was taken. The \textbf{likelihood function} (for a single parameter) or \textbf{likelihood surface} (for multiple parameters) describes the relationship between different parameter values and their likelihood. We can't always derive convenient equations to obtain maximum likelihood estimates, however, and in those cases we may have to rely on algorithmic searches of ``parameter space'' to find the MLE. \hypertarget{probability-distributions-commonly-used-in-biological-statistics}{% \section{Probability distributions commonly used in biological statistics}\label{probability-distributions-commonly-used-in-biological-statistics}} (Many of these are thanks to Sally Otto at UBC) \hypertarget{discrete-probability-distributions}{% \subsection{Discrete Probability Distributions}\label{discrete-probability-distributions}} \hypertarget{geometric-distribution}{% \subsubsection{\texorpdfstring{\textbf{Geometric Distribution}}{Geometric Distribution}}\label{geometric-distribution}} If a single event has two possible outcomes at probability \texttt{p} and \texttt{1-p}, and is independent of past events (\emph{i.e.} a Bernoulli trial), the probability of having to observe \texttt{k} trials before the first ``success'' appears is given by the \textbf{geometric distribution}. The probability that the first ``success'' would appear on the first trial is \texttt{p}, but the probability that the \emph{first} ``success'' appears on the second trial is \texttt{(1-p)*p}. By generalizing this procedure, the probability that there will be \texttt{k-1} failures before the first success is: \[P(X=k)=(1-p)^{k-1}p\] \begin{itemize} \tightlist \item mean = \(\frac{1}{p}\) \item variance = \(\frac{(1-p)}{p^2}\) \end{itemize} \hypertarget{the-geometric-distribution-in-practice}{% \paragraph{\texorpdfstring{The \textbf{Geometric Distribution} in practice}{The Geometric Distribution in practice}}\label{the-geometric-distribution-in-practice}} The geometric distribution applies in any scenario in which we want to know the probability of a certain number of failures before we observe an event (assuming each trial is independent). Dice rolls, free throws in basketball, sales pitches, and many more such sequential trials with two outcomes are modeled well by the geometric distribution. For example, if the probability of extinction of an endangered population is estimated to be 0.1 every year, what is the expected time until extinction? \begin{center}\includegraphics[width=0.8\linewidth]{images/prob.017} \end{center} Here we can see the probability of extinction in any given year. If we wanted to know the probability of extinction \emph{by} a specific year, we can simply apply the `or' rule and sum the probabilities up until the specified year. The probability of extinction by year 4 is equivalent to the probability of extinction in year 1 \emph{or} year 2 \emph{or} year 3, \(0.1 + (1 - 0.1)*0.1 + (1 - 0.1)^2*0.1 = 0.271\). \hypertarget{binomial-distribution}{% \subsubsection{\texorpdfstring{\textbf{Binomial Distribution}}{Binomial Distribution}}\label{binomial-distribution}} A \textbf{binomial distribution} represents the distribution of outcomes from the \textbf{combination} of several Bernoulli trials \emph{i.e.} independent trials with only two outcomes. In fact, the Bernoulli distribution is just a special case of the binomial distribution for n = 1 Bernoulli trials. The distribution of probabilities for each combination of outcomes is \[\large f(k) = {n \choose k} p^{k} (1-p)^{n-k}\] - \texttt{n} is the total number of trials - \texttt{k} is the number of successes - \texttt{p} is the probability of success - \texttt{q} is the probability of not success - For binomial as with the Bernoulli \texttt{p\ =\ 1-q} \hypertarget{the-binomial-distribution-in-practice}{% \paragraph{\texorpdfstring{The \textbf{binomial distribution} in practice}{The binomial distribution in practice}}\label{the-binomial-distribution-in-practice}} Pretend that you flip 20 fair coins (or collect alleles from a heterozygote). Now repeat that process 100 times and record the number of heads that show. We expect that most of the time we will get approximately 10 heads in 20 flips. However, sometimes we will get many fewer heads or many more heads. If we plot the frequency of the proportion of ``successes'', or heads, we get in each of our 100 replicates, we get the binomial distribution. Because our coin is fair, we can reasonably expect this distribution to center around 0.5. \begin{center}\includegraphics[width=1\linewidth]{images/week_2.003} \end{center} The binomial distribution is the basis for frequency tests when outcomes are binary. \hypertarget{negative-binomial-distribution}{% \subsubsection{\texorpdfstring{\textbf{Negative Binomial Distribution}}{Negative Binomial Distribution}}\label{negative-binomial-distribution}} The \textbf{negative binomial distribution} is an extension of the geometric distribution describing the expected time until not just one success but \texttt{r} ``successes'' have occurred. Mathematically, it is a generalization of the geometric distribution, where the probability of the \(r^{th}\) ``success'' appearing on the \(k^{th}\) trial is: \[P(X=k)=(\frac{k-1}{r-1})p^{r-1}(1-p)^{k-r}p\] which simplifies to \[P(X=k)=(\frac{k-1}{r-1})p^{r}(1-p)^{k-r}\] \begin{itemize} \tightlist \item mean = \(\frac{r}{p}\) \item variance = \(r(1-p)/p^2\) \end{itemize} For example, if a predator must capture 10 prey before it can grow large enough to reproduce, what would be the expected age of onset of reproduction if the probability of capturing a prey on any given day is 0.1? \begin{center}\includegraphics[width=0.5\linewidth]{images/prob.018} \end{center} Notice that the variance is quite high (\textasciitilde{}1000) and the distribution is fairly skewed. Generally, a low probability of success \texttt{p} and a high threshold of successes \texttt{r} leads to a highly dispersed distribution with considerable kurtosis (`tailedness'). \hypertarget{poisson-probability-distribution}{% \subsubsection{\texorpdfstring{\textbf{Poisson Probability Distribution}}{Poisson Probability Distribution}}\label{poisson-probability-distribution}} Another common situation in biology is when each trial is discrete, but the number of observations of each outcome is observed/counted. Such scenarios are modeled well by the \textbf{Poisson distribution}. For example, counts of snails in several plots of land, observations of the firing of a neuron in a unit of time, or count of genes in a genome binned to units of 500 AA. Just like before, you have `successes', but now you count them for each replicate where replicates are now units of area or time. Values can now range from 0 to a large number. For example, you can examine 1000 genes and count the number of base pairs in the coding region of each gene. What is the probability of observing a gene with `r' bp? \texttt{Pr(Y=r)} is the probability that the number of occurrences of an event \texttt{y} equals a count \texttt{r} in the total number of trials. \[Pr(Y=r) = \frac{e^{-\mu}\mu^r}{r!}\] Note that this is a single parameter function because \(\mu = \sigma^2\) - the two together are often just represented by \(\lambda\). \[Pr(y=r) = \frac{e^{-\lambda}\lambda^r}{r!}\] This means that for a variable that is truly Poisson distributed, the mean and variance should be roughly equal to one another. Variables that are approximately Poisson distributed but have a larger variance than the mean are called `overdispersed', indicating that the observed variance is larger than appropriate for the theoretical distribution. This is quite common in RNA-seq and microbiome data. When overdispersion is a problem in count data, we often use the negative binomial distribution instead because it allows the variance to differ from the mean. \hypertarget{poisson-probability-distribution-gene-length-by-bins-of-500-nucleotides}{% \paragraph{Poisson Probability Distribution \textbar{} gene length by bins of 500 nucleotides}\label{poisson-probability-distribution-gene-length-by-bins-of-500-nucleotides}} \begin{center}\includegraphics[width=0.8\linewidth]{images/week_2.004} \end{center} \hypertarget{poisson-probability-distribution-increasing-parameter-values-of-lambda}{% \paragraph{\texorpdfstring{Poisson Probability Distribution \textbar{} increasing parameter values of \(\lambda\)}{Poisson Probability Distribution \textbar{} increasing parameter values of \textbackslash{}lambda}}\label{poisson-probability-distribution-increasing-parameter-values-of-lambda}} \begin{center}\includegraphics[width=0.7\linewidth]{images/week_2.005} \end{center} \hypertarget{horse-kick-deaths-in-the-prussian-army}{% \paragraph{Horse kick deaths in the Prussian army}\label{horse-kick-deaths-in-the-prussian-army}} One of the earliest applications of the Poisson distribution was in 1898, when it was used to model the number of soldier deaths from horse kicks in 14 different corps of the Prussian army. As can be seen from the chart below, the Poisson distribution does a remarkable job at modeling these unfortunate events. Indeed, while it is useful for count data in general, it is particularly effective at modeling the distribution of unlikely, independent events. \begin{center}\includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-54-1} \end{center} \hypertarget{continuous-probability-distributions}{% \subsection{\texorpdfstring{\textbf{Continuous probability distributions}}{Continuous probability distributions}}\label{continuous-probability-distributions}} Up until this point, we have been looking at \emph{discrete} probability distributions, where our measurements represent integer or categorical values (event outcomes, counts, etc\ldots{}) and the probability of a specific observation can be directly quantified. We will now discuss probability density functions (PDFs), better known as \textbf{continuous probability distributions}. These represent the distribution of \emph{continuous} values, from which a random sample can take on an infinite number of values within the range of the distribution (limited by measurement accuracy). As such, unlike discrete probability distributions, the probability of finding any \emph{exact} value within a continuous distribution is effectively 0. We must instead look at the probability of a measurement falling between a range of values, \texttt{a} and \texttt{b} \emph{i.e.} the integral of the density function (the area beneath the curve) between said values. P(observation lies within dx of x) = f(x)dx \[P(a\leq X \leq b) = \int_{a}^{b} f(x) dx\] Remember that the indefinite integral sums to one \[\int_{-\infty}^{\infty} f(x) dx = 1\] The expected value of a random variable \texttt{X}, \texttt{E{[}X{]}}, may be found by integrating the product of \texttt{x} and the probability density function over all possible values of \texttt{x}: \[E[X] = \int_{-\infty}^{\infty} xf(x) dx \] \(Var(X) = E[X^2] - (E[X])^2\), where the expectation of \(X^2\) is \[E[X^2] = \int_{-\infty}^{\infty} x^2f(x) dx \] \hypertarget{uniform-distribution}{% \subsubsection{\texorpdfstring{\textbf{Uniform Distribution}}{Uniform Distribution}}\label{uniform-distribution}} The uniform distribution is rectangular, meaning that all values have equal probability between the bounds of the distribution \([a,b]\). Its PDF for an expected value of \texttt{X} is given by \[E[X] = \int_{a}^{b} x\frac{1}{b-a} dx = \frac{(a+b)}{2} \] \begin{center}\includegraphics[width=1\linewidth]{images/prob.019} \end{center} While we are introducing it here as a continuous probability distribution, the uniform distribution has a discrete application as well. Both are used to describe any scenario in which an outcome has equal probability to occur, like true random number generation, or a dice roll in the discrete case. \hypertarget{exponential-distribution}{% \subsubsection{\texorpdfstring{\textbf{Exponential Distribution}}{Exponential Distribution}}\label{exponential-distribution}} The \textbf{exponential distribution} can be thought of as the continuous alternative to the geometric distribution, describing the probability of the occurrence of an event or state change over time, given a continuous process. It is defined by a single parameter, the rate constant \(\lambda\), which represents the instantaneous probability of an event occurring. The PDF is \[f(x)=\lambda e^{-\lambda x}\] \texttt{E{[}X{]}} can be found be integrating \(xf(x)\) from 0 to infinity, leading to the result that \begin{itemize} \tightlist \item \(E[X] = \frac{1}{\lambda}\) \item \(E[X^2] = \frac{1}{\lambda^2}\) \end{itemize} For example, let \(\lambda\) represent the instantaneous death rate of an individual. The expected lifespan of that individual would be described by an exponential distribution (assuming that \(\lambda\) does not change over time). \begin{center}\includegraphics[width=0.7\linewidth]{images/prob.020} \end{center} More generally, the exponential distribution describes many situations in which the probability of an event is approximately constant and independent. It is widely applied in survival analysis, actuarial sciences, marketing, and the physical sciences (particularly any process exhibiting exponential decay). \hypertarget{gamma-distribution}{% \subsubsection{\texorpdfstring{\textbf{Gamma Distribution}}{Gamma Distribution}}\label{gamma-distribution}} The gamma distribution generalizes the exponential distribution in the same way that the negative binomial distribution generalizes the geometric distribution. Instead of representing the probability of the first occurrence of an event, it models the waiting time until the \(r^{th}\) event for a process that occurs randomly over time at a rate \(\lambda\): \[f(x) = \frac{e^{-\lambda x}\lambda x^{r-1}}{(r-1)!}\lambda\] \[ Mean = \frac{r}{\lambda} \] \[ Variance = \frac{r}{\lambda^2} \] For example, if in a PCR reaction, DNA polymerase synthesizes new DNA strands at a rate of 1 per millisecond, how long until 1000 new DNA strands are produced? Assume that DNA synthesis does not deplete the pool of primers or nucleotides in the chamber, so that each event is independent of other events in the PCR chamber. \hypertarget{the-gaussian-or-normal-distribution}{% \subsubsection{The Gaussian or Normal Distribution}\label{the-gaussian-or-normal-distribution}} The `Gaussian', or \textbf{Normal distribution} is one of the best known probability distributions. Many people whether statistically versed or not have an intuitive understanding of the normal distribution because it models the nature of random continuous variables in a population well - that is, they have a central tendency plus a constrained amount of deviation around this tendency (a ``bell curve''). The normal distribution has two parameters, the mean \(\mu\) and the standard deviation \(\sigma\). The PDF of the normal distribution is defined as \begin{center}\includegraphics[width=0.4\linewidth]{images/week_2.032} \end{center} where \[\large \pi \approx 3.14159\] \[\large e \approx 2.71828\] To write that a variable (v) is distributed as a normal distribution with mean \(\mu\) and variance \(\sigma^2\), we write the following: \[\large v \sim \mathcal{N} (\mu,\sigma^2)\] \hypertarget{normal-pdf-estimates-of-mean-and-variance}{% \paragraph{Normal PDF \textbar{} estimates of mean and variance}\label{normal-pdf-estimates-of-mean-and-variance}} Estimate of the mean from a single sample \[\Large \bar{x} = \frac{1}{n}\sum_{i=1}^{n}{x_i} \] Estimate of the variance from a single sample \[\Large s^2 = \frac{1}{n-1}\sum_{i=1}^{n}{(x_i - \bar{x})^2} \] \begin{center}\includegraphics[width=0.9\linewidth]{images/week_2.010} \end{center} \hypertarget{why-is-the-normal-distribution-special-in-biology}{% \paragraph{Why is the Normal distribution special in biology?}\label{why-is-the-normal-distribution-special-in-biology}} \begin{center}\includegraphics[width=1\linewidth]{images/week_2.013} \end{center} \begin{center}\includegraphics[width=1\linewidth]{images/week_2.015} \end{center} \begin{center}\includegraphics[width=0.6\linewidth]{images/week_2.014} \end{center} \hypertarget{parent-offspring-resemblance}{% \paragraph{Parent-offspring resemblance}\label{parent-offspring-resemblance}} \begin{center}\includegraphics[width=0.45\linewidth]{images/week_2.016} \end{center} \hypertarget{genetic-model-of-complex-traits}{% \paragraph{Genetic model of complex traits}\label{genetic-model-of-complex-traits}} \begin{center}\includegraphics[width=0.9\linewidth]{images/week_2.017} \end{center} \hypertarget{distribution-of-f_2-genotypes-really-just-binomial-sampling}{% \paragraph{\texorpdfstring{Distribution of \(F_2\) genotypes \textbar{} really just binomial sampling}{Distribution of F\_2 genotypes \textbar{} really just binomial sampling}}\label{distribution-of-f_2-genotypes-really-just-binomial-sampling}} \begin{center}\includegraphics[width=0.7\linewidth]{images/week_2.018} \end{center} \hypertarget{the-central-limit-theorm}{% \paragraph{The Central Limit Theorm}\label{the-central-limit-theorm}} The normal distribution is immensely useful because of the \textbf{Central Limit Theorem}, which says that the mean or the sum of many random variables independently drawn from the same distribution is distributed approximately normally. One can think of numerous real-world situations in which this applies, such as when multiple genes contribute to a phenotype or when many factors contribute to a biological process. In addition, whenever there is variance introduced by stochastic factors the central limit theorem holds. Thus, normal distributions occur throughout genomics, while representing the basis of classical statistics. \hypertarget{a-note-on-z-scores-of-normal-variables}{% \paragraph{A note on z-scores of normal variables}\label{a-note-on-z-scores-of-normal-variables}} Often we want to make variables more directly comparable to one another, particularly when they have scales differing by one or more orders of magnitude. For example, consider measuring the leg length of mice and of elephants. Which animal has longer legs in absolute terms? What about proportional to their body size? A good way to answer these last questions is to use `z-scores'. A z-score is a statistic standardized to a mean of 0 and a standard deviation of 1. To attain the z-score for a given estimate, we can modify any normal distribution to have a mean of 0 and a standard deviation of 1 by normalizing the distribution to the population's standard deviation (another term for this is the standard normal distribution). The z-score, then, is the number of standard deviations from the mean (0) of this distribution. \[\huge z_i = \frac{(x_i - \bar{x})}{s}\] \hypertarget{exercises-associated-with-this-chapter-3}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-3}} \begin{itemize} \tightlist \item Problem Set 2 \end{itemize} \hypertarget{additional-learning-resources-3}{% \section{Additional learning resources:}\label{additional-learning-resources-3}} \begin{itemize} \item Irizarry, R. A. Introduction to Data Science. \url{https://rafalab.github.io/dsbook/} - A gitbook written by a statistician, with great introductions to key topics in statistical inference. \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \end{itemize} \hypertarget{parameter-estimation-basics-and-the-sampling-process}{% \chapter{Parameter Estimation Basics and the Sampling Process}\label{parameter-estimation-basics-and-the-sampling-process}} \hypertarget{background-3}{% \section{Background}\label{background-3}} A major goal of statistics is to estimate \textbf{parameters}, or features, of a population so that we can compare them to values that are of practical importance to our understanding of the system, or to compare parameter estimates between and among different populations. We may want to know whether the slope of a relationship between two variables is really different from zero (e.g.~no relationship) or whether the average value of a trait in a population is especially extreme relative to other populations. Some of these tasks are getting into the territory of hypothesis testing (which we will get to later), but estimating the parameters we ultimately may want to compare is an important first step. In this chapter we will discuss how parameters are estimated from samples we, as scientists, collect. \hypertarget{understanding-populations-and-their-parameters}{% \section{Understanding populations and their parameters}\label{understanding-populations-and-their-parameters}} From a probabilistic standpoint we often think about the systems we study as theoretical ``populations'' of entities, wherein each population is defined by a particular set of shared conditions. Almost always (unless we are simulating a population), it is simply not possible to measure or observe all of the entities in a population, so if we want to understand the population we need to estimate its parameters. As empiricists, how do we estimate parameters? As you probably know, we conduct studies and/or experiments in which we take random samples and measure variables in the sample that correspond to the population parameters of interest. It is important to consider whether these samples are random if we apply the probability distribution and random variable framework (from last chapter) to our statistical inference. When we take a random sample from a population we can estimate a parameter by doing a calculation based on the sample itself. So, maybe we want to estimate the mean mass of all adult rainbow trout in a lake. We can't measure all fish, so we take a random sample and calculate a sample mean mass. In typed notation, we use letters from the greek alphabet to represent population parameters, and letters from the latin alphabet to represent sample attributes. For example, a population mean is represented by \(\sigma\), and a sample mean by \(\bar{x}\). A sample mean gives us an estimate of the true population mean, but because it is a random sample, we don't know how close our estimate is to the true parameter value. We do know from first principles of probability that as the size of our sample increases, so does the closeness of our estimate to the true value. That is, as our sample size approaches our actual population size (which may be infinitely large depending on how defined), our estimate approaches the true parameter value. We also know that the closeness of our estimate to the real parameter value depends on how much ``spread'' there is in the distribution of values that defines the population. These two values - the sample size and the spread of the distribution - contribute to what is known as the \textbf{standard error} of a random variable. The standard error for any given sample attribute (such as a sample mean), can be calculated either based on distributional assumptions, or by a process called ``resampling.'' We will return to these procedures below. The important thing to understand for now, is that the standard error can be used to indicate how close a sample-based estimate is to the actual population parameter value. One way to get a handle on how the sampling process relates to parameter estimation is to actually simulate a population, based on a known probability distribution for example, and take multiple samples of varying sizes from that population. You can calculate estimates (such as the mean and standard deviation) from each sample, then visualize how they vary and how close they are to the true parameter value. Here is a quick example, in which we simulate a random, normally distributed variable in a population of 1000 individuals, take 50 random samples of 10 individuals each time, and look at the distribution of sample means across those 50 samples. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{set.seed}\NormalTok{(}\DecValTok{32}\NormalTok{)} \NormalTok{pop_var <-}\StringTok{ }\KeywordTok{rnorm}\NormalTok{(}\DecValTok{1000}\NormalTok{, }\DataTypeTok{mean =} \DecValTok{30}\NormalTok{, }\DataTypeTok{sd =} \DecValTok{8}\NormalTok{) }\CommentTok{#Define our population random variable} \NormalTok{samps_var <-}\StringTok{ }\KeywordTok{replicate}\NormalTok{(}\DecValTok{50}\NormalTok{, }\KeywordTok{sample}\NormalTok{(pop_var, }\DecValTok{10}\NormalTok{)) }\CommentTok{#Take 50 samples of size 10} \NormalTok{samps_var_means <-}\StringTok{ }\KeywordTok{apply}\NormalTok{(samps_var, }\DecValTok{2}\NormalTok{, mean) }\CommentTok{#Calculate the mean from each sample} \KeywordTok{hist}\NormalTok{(samps_var_means) }\CommentTok{#Plot the distribution of sample means} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-65-1.pdf} In the above example we see that we do most commonly get a sample mean near the expected population value of 30, but that there is quite a bit of variation! This \textbf{sampling} variation is what we have to deal with, and account for, as empircial scientists. If this had been a real-world scenario, we likely would be basing our estimate for \(\sigma\) on just a single \(\bar{x}\). In this simulation nine of our samples gave us an estimated mean \textless{} 28. Whether that estimate is ``close enough'' to the true value of 30 depends on a variety of questions we may have about the system, but this idea of uncertainty in our parameter estimation is important. Fortunately we can rely on a number of tools to evaluate how close we think our sample-based estimates are to population parameter values in the real world, which we visit below. \hypertarget{more-on-parameter-estimation-and-sampling-distributions}{% \section{More on parameter estimation and sampling distributions}\label{more-on-parameter-estimation-and-sampling-distributions}} The exercise above illustrates the concept of a sampling distribution. We sampled over and over again (50 times) and calculated the mean for each sample to demonstrate the sampling distribution for the mean, our original parameter of interest. One important point is that the sampling distribution for a given parameter is often very different from the variable's distribution in the population. In many cases, the sampling distribution is normal or approximately so. \begin{center}\includegraphics[width=1\linewidth]{images/week_2.025} \end{center} For most real world data sets we can't empirically determine a sampling distribution by taking many actual samples, because we often have just the one sample. Fortunately we can rely on the Central Limit Theorem (discussed in the last chapter) to make some assumptions about sampling distributions, particularly when estimating a mean from a single sample, or when estimating most any parameter using a ``pseudo'' or re-sampling process we refer to as ``bootstrapping.'' As noted, the \textbf{expected value} of a very large number of sample estimates is the value of the parameter being estimated. The sampling distribution of an estimate models all values we might have obtained from our sample and their probabilities of occurrence. The standard error of an estimate can be conceptualized as the standard deviation of a sampling distribution. So, whenever we obtain a parameter estimate, we need to include the standard error in some form or another, which is a measure of the precision of about our estimate. \hypertarget{calculating-the-standard-error-of-the-mean}{% \section{Calculating the standard error of the mean}\label{calculating-the-standard-error-of-the-mean}} Because distribution functions for sums of many independent events are approximately normal (the Central Limit Theorem), and because dividing any normally distributed random variable by a single value (constant) is also normally distributed, this leads to a special case for sample means. These assumptions work out very conveniently for means, because a mean is just that: a sum of observations divided by the total number of observations. Because we can assume that sampling distributions for means are effectively normal, we can use attributes of the normal probability density function to conveniently calculate the standard error of any mean estimated from a random sample. \begin{center}\includegraphics[width=1\linewidth]{images/week_2.028} \end{center} \[\huge \sigma_{\bar{x}} \approx s_{\bar{x}} = \frac{s}{\sqrt{n}} \] \begin{itemize} \tightlist \item Where \(s_{\bar{x}}\) is the estimated standard error of the distribution of the mean estimates \item This is usually just referred to as the `standard error of the mean' (SEM) \item Note that this \textbf{is not} the standard deviation of the original distribution \item Importantly, the SEM will go down as the sample size increases \end{itemize} \hypertarget{the-bootstrap-to-estimate-parameters-and-the-standard-error}{% \section{The bootstrap to estimate parameters and the standard error}\label{the-bootstrap-to-estimate-parameters-and-the-standard-error}} Unfortunately, most other kinds of estimates do not have this amazing property, but we can rely on another approach to calculate the standard error. This involves generating your own sampling distribution for the estimate using the ``bootstrap,'' a method invented by Efron (1979). We call the bootstrap, and other methods that do not rely on distributional assumptions of the variable itself, ``nonparametric'' approaches. We can actually quite easily use \texttt{R} to take a random sample of individuals (with replacement) from the original data to implement the bootstrap via the following steps: \begin{itemize} \tightlist \item Calculate the estimate using the measurements in the bootstrap sample (step 1) \begin{itemize} \tightlist \item This is the first bootstrap replicate estimate \end{itemize} \item Repeat steps 1 and 2 a large number of times (1000 times is reasonable) \item Calculate the sample standard deviation of all the bootstrap replicate estimates obtained in step 3 \item The resulting quantity is called the ``bootstrap standard error'' \end{itemize} The bootstrap is effective for a number of reasons. It can be applied to almost any sample statistic, including means, proportions, correlations, and regression parameters. It works when there is no ready formula for a standard error, for example when estimating the median, trimmed mean, correlation, eigenvalue, etc. It is nonparametric, so doesn't require normally-distributed data, as mentioned. It works well for parameter estimates that are based on complicated sampling procedures or calculations. For example, it is used to assess confidence in local relationships within phylogenetic trees. \hypertarget{confidence-intervals}{% \section{Confidence intervals}\label{confidence-intervals}} A concept related to parameter estimates and their standard errors is the idea of the ``confidence interval.'' A confidence interval is a range of values about a parameter \textbf{estimate}, such that we are X\% certain that the true population parameter value lies within that interval. We will return to the topic again in the hypothesis testing section of this book, when we discuss the \emph{t} distribution in the context of comparing two population means. For now, know that for a normally distributed sample, a confidence interval about the population mean can be calculated using the \texttt{t.test()} function in base \texttt{R}. The 95\% confidence interval is commonly reported in statistical analysis results, by convention, but other values are occasionally reported as well. \hypertarget{the-relationship-between-mean-and-variance}{% \section{The relationship between mean and variance}\label{the-relationship-between-mean-and-variance}} To add one last, short note on the comparison of population standard deviations, it is important to understand that population means and variances (and hence standard deviations) tend to have a strong, positive relationship. This means that an otherwise similarly shaped distribution, but with a much larger mean, will by default have a much larger standard deviation as well. The positive mean-variance relationship tends to make direct comparisons of variation between populations with very different means difficult. For instance, comparing the standard deviation for a body measurement in a population of mice, with the same body measurement in a population of elephants is not meaningful. To make standard deviations comparable across populations with very different means, we can instead compare a standardized metric called the ``coefficient of variation'' (CV), which is simply the sample standard deviation divided by the sample mean (and usually expressed as a \% by multiplying by 100). \hypertarget{exercises-associated-with-this-chapter-4}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-4}} \begin{itemize} \tightlist \item Problem Set 2 \end{itemize} \hypertarget{additional-learning-resources-4}{% \section{Additional learning resources:}\label{additional-learning-resources-4}} \begin{itemize} \item Irizarry, R. A. Introduction to Data Science. \url{https://rafalab.github.io/dsbook/} - A gitbook written by a statistician, with great introductions to key topics in statistical inference. \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \end{itemize} \hypertarget{principles-of-experiment-and-study-design}{% \chapter{Principles of Experiment and Study Design}\label{principles-of-experiment-and-study-design}} \hypertarget{background-4}{% \section{Background}\label{background-4}} In order to obtain proper estimates for the parameters we care about as scientists, we have to keep in mind the probability and sampling principles discussed in the preceeding chapters. Studies and experiments enable us to take samples and effectively make statistical inferences, but only if we design the studies in structured ways that adhere to (and therefore take advantage of) sampling theory assumptions. Two of the most important conepts along these lines are \textbf{replication} and \textbf{randomization}. As you learned in the last chapter, our uncertainty (as measured by the standard error) about estimation of a particular population parameter is high when our sample size is low, and this uncertainty decreases as we increase sample size. For this reason, the number of ``replicates'' in a study is intrinsically tied to uncertainty. We will return to this in the context of ``statistical power'' in the next chapter. If we require little uncertainty when estimating or comparing parameters, we need adequate replication. Furthermore, sampling has to be performed at the level of each population in question. For example, if we are comparing two treatments in an experiment, we need adequate replication within both of those treatments, because they represent two different populations. We also learned that sampling theory is based on the notion of the random variable, which is modeled by random draws of observations from a theoretical population. Individuals in a study or experiment need to be selected or assigned to groups randomly and independent of one another to avoid bias in our parameter estimates and comparisons. In this chapter we will discuss how these considerations, and related ones, have been formalized into conventions of experimental design that should be followed across the scientific community. \hypertarget{what-is-an-experimental-study}{% \section{What is an experimental study?}\label{what-is-an-experimental-study}} We should start by describing a few different categories of study. All categories usually involve, either directly or inderectly, the estimation of population parameters. In some studies, which we refer to as ``experimental,'' the researcher assigns treatments to units is such a way to ``isolate'' the effects of the treatments on the variable(s) of interest. \emph{A quick aside: In study design parlance, we refer to these variables of interest as ``dependent'' or ``response'' variables. Response variables are usually the basis for the population parameters we are trying to estimate and compare. The variables we are manipulating (as in an experiment) or otherwise measuring for their explanatory potential, are called ``independent'' or ``explanatory'' variables.} In an observational study, on the other hand, we let nature ``do the assigning'' of treatments to units, and we simply observe and measure the relationships among variables, whether they are causal or not. The crucial advantage of experiments over observational studies, then, derives from the random assignment of treatments to units. Random assignment, or randomization, minimizes the influence of confounding variables. To illustrate the advantage of randmonization, consider the following example. \hypertarget{a-hypothetical-study-example}{% \subsection{A hypothetical study example}\label{a-hypothetical-study-example}} Let's say that we know survival of climbers of Mount Everest is higher for individuals taking supplemental oxygen than those who don't. As physiologists, or anthropologists, or exercise physiologists, or sociolgists, or tourism economists, we may want to know \textbf{\emph{why}} this observation is true. One possibility is that supplemental oxygen (our explanatory variable) really does cause higher survival (our response variable). The other is that the two variables are associated because other variables affect both supplemental oxygen and survival. For instance, use of supplemental oxygen might be a benign indicator of a greater overall preparedness of the climbers that use it. Variables (like preparedness) that distort the causal relationship between the measured variables of interest (oxygen use and survival) are called \textbf{confounding variables}. They are correlated with the variable of interest, and therefore they prevent a decision about cause and effect. With randomized assignment, no confounding variables will be associated with treatment except by chance, so if sample sizes are large enough no spurious conclusions about cause and effect should be reached. \hypertarget{basic-study-design-terminology}{% \section{Basic study design terminology}\label{basic-study-design-terminology}} Many experimentalists, and indeed most statisticians working with experimental data, use a common set of terms to describe elements of a study. Those terms are used and defined in this section. For example, we defined response (or dependent) and explanatory (or independent) variables above, and more will be introduced below. It is good to become familiar with these terms, as they will continually pop up as you read the literature within your own study discipline, read technical publications and online posts regarding statistical analysis, and engage in discussions with collaborators and analysts. To start out, below is a table of some general terms we have covered already, with some more formal definitions. \includegraphics[width=12.4in]{images/Logan_ExpTerms} From Logan, M. 2010 \hypertarget{clinical-trials}{% \section{Clinical trials}\label{clinical-trials}} The gold standard of experimental designs is the \textbf{clinical trial}. In fact, experimental design in all areas of biology have been informed by procedures used in clinical trials. A clinical trial is an experimental study in which two or more treatments are assigned to human subjects, usually with some design specifics we visit below. The design of clinical trials has been refined and approached with great care because the cost of making a mistake with human subjects is so high. Experiments on nonhuman subjects are simply called ``laboratory experiments,'' or ``field experiments'' in cases where the experiment is not confined to a small, indoor space. \hypertarget{a-clinical-trial-example}{% \subsection{A clinical trial example}\label{a-clinical-trial-example}} Transmission of the HIV-1 virus via sex workers contributes to the rapid spread of AIDS in Africa. The spermicide nonoxynol-9 had shown \emph{in vitro} activity against HIV-1, which motivated a clinical trial by van Damme et al. (2002). In this study the authors tested whether a vaginal gel containing the chemical would reduce the risk of acquiring the disease by female sex workers. Data were gathered on a volunteer sample of 765 HIV-free sex-workers in six clinics in Asia and Africa. Two gel treatments were assigned randomly to women at each clinic, one gel containing nonoxynol-9 and the other a placebo. Neither the subjects nor the researchers making observations at the clinics knew who received the treatment and who got the placebo. The table below shows the raw data. \includegraphics[width=12.9in]{images/images_6a.005} A major goal of experimental design is to eliminate bias and to reduce sampling error when estimating and testing effects of one variable on another. To reduce bias, the experiment described above included the following: \begin{itemize} \tightlist \item A \textbf{Simultaneous control group}: Inclusion of both the treatment of interest and a control group (the women receiving the placebo). \item \textbf{Randomization}: Treatments were randomly assigned to women at each clinic. \item \textbf{Blinding}: Neither the subjects nor the clinicians knew which women were assigned which treatment. \end{itemize} To reduce the effects of sampling error, the experiment included the following: \begin{itemize} \tightlist \item \textbf{Replication}: A study is carried out on multiple, independent subjects. \item \textbf{Balance}: Equal sample sizes across treatment groups. In this case, the number of women was nearly equal in the two groups at every clinic. \item \textbf{Blocking}: Treatments are applied systematically within larger groups that likely explain large amounts of variation. Here subjects were grouped according to the clinic they attended, yielding multiple repetitions of the same experiment in different settings (``blocks''). \end{itemize} Let's consider each of these design elements in turn, in a bit more depth. \hypertarget{simultaneous-control-groups}{% \subsection{Simultaneous control groups}\label{simultaneous-control-groups}} In clinical trials either a placebo or the currently accepted treatment should be provided. In experiments requiring intrusive methods to administer treatment, such as injections, surgery, restraint, confinement, etc., the control subjects should be perturbed in the same way as the other subjects, except for the treatment itself, as far as ethical considerations permit. The ``sham operation'', in which surgery is carried out without the experimental treatment itself, is an example.vIn field experiments, applying a treatment of interest may physically disturb the plots receiving it and the surrounding areas, perhaps by trampling the ground by the researchers.Ideally, the same disturbance should be applied to the control plots. \hypertarget{randomization}{% \subsection{Randomization}\label{randomization}} The researcher should randomize assignment of treatments to units or subjects. Chance rather than conscious or unconscious decision determines which units end up receiving the treatment and which the control. A completely randomized design is one in which treatments are assigned to all units by randomization. Randomization breaks the association between possible confounding variables and the explanatory variable. It doesn't eliminate the variation contributed by confounding variables, only their correlation with the treatment variable. Randomization ensures that variation from confounding variables is similar between the different treatment groups. Randomization should be carried out using a random process such as a random number generator. A general strategy might include the following steps: \begin{itemize} \tightlist \item List all n subjects, one per row, in a spreadsheet or computer programming object. \item Use the computer to give each individual a random number. \item Assign treatment A to those subjects receiving the lowest numbers and treatment B to those with the highest numbers. \end{itemize} Other procedures for assigning treatments to subjects are almost always inferior because they do not eliminate the effects of confounding variables. ``Haphazard'' assignment, in which the researcher chooses a treatment while trying to make it random, has repeatedly been shown to be non-random and prone to bias. \hypertarget{blinding}{% \subsection{Blinding}\label{blinding}} Blinding is the process of concealing information from participants (sometimes including researchers) about which subjects receive which treatment. Blinding prevents subjects and researchers from changing their behavior, consciously or unconsciously, as a result of knowing which treatment they were receiving or administering. For example, studies showing that acupuncture has a significant effect on back pain are limited to those without blinding (Ernst and White 1998). In a single-blind experiment, the subjects are unaware of the treatment that they have been assigned. Treatments must be indistinguishable to subjects, which prevents them from responding differently according to knowledge of treatment. Blinding can also be a concern in non-human studies where animals respond to stimuli In a double-blind experiment the researchers administering the treatments and measuring the response are also unaware of which subjects are receiving which treatments. Researchers sometimes have pet hypotheses, and they might treat experimental subjects in different ways depending on their hopes for the outcome. Many response variables are difficult to measure and require some subjective interpretation, which makes the results prone to a bias. Researchers are naturally more interested in the treated subjects than the control subjects, and this increased attention can itself result in improved response. Reviews of medical studies have revealed that studies carried out without double- blinding exaggerated treatment effects by 16\% on average compared with studies carried out with double-blinding (Jüni et al.~2001). Experiments on non--human subjects are also prone to bias from lack of blinding. Bebarta et al.(2003) reviewed 290 two-treatment experiments carried out on animals or on cell lines. The odds of detecting a positive effect of treatment were more than threefold higher in studies without blinding than in studies with blinding. Blinding can be incorporated into experiments on nonhuman subjects using coded tags that identify the subject to a ``blind'' observer without revealing the treatment (and who measures units from different treatments in random order). \hypertarget{replication}{% \subsection{Replication}\label{replication}} The goal of experiments is to estimate and test treatment effects against the background of variation between individuals (``noise'') caused by other variables. One way to reduce noise is to make the experimental conditions constant. In field experiments, however, highly constant experimental conditions might not be feasible nor desirable. By limiting the conditions of an experiment, for example, we also limit the generality of the results. There is always a tradeoff between the range of explanatory variables explored and the extent of replication required. For many different treatment types in an experiment, or a broad range of explanatory variable values in an observational study, we need adequate replication across the range of that explanatory variable, so more replication will be required. Replication in an experiment is the assignment of each treatment to multiple, independent experimental units. Without replication, we would not know whether response differences were due to the treatments or just chance differences between the treatments caused by other factors. As discussed, studies that use more units (i.e.~that have larger sample sizes) will have smaller standard errors and a higher probability of getting the correct answer from a hypothesis test. Larger samples mean more information, and more information means better estimates and more powerful tests. \hypertarget{a-note-on-pseudoreplication}{% \subsection{A note on pseudoreplication}\label{a-note-on-pseudoreplication}} Replication is not about the total number of plants or animals used, but the number of independent units in the experiment. An ``experimental unit'' is the independent unit to which treatments are assigned. The figure below shows three experimental designs used to compare plant growth under two temperature treatments (indicated by the shading of the pots). The first two designs are actually un-replicated with respect to temperature and regarding growth as a random, independent variable. When individual observations in a study are erroneously treated as independent in subsequent statistical analysis, we call it ``psuedoreplication.'' Pseudoreplication results in violation of the assumptions we rely on to conduct statistical hypothesis tests, and it will cause misleading conclusions to be drawn, usually in the form of rejection of the null hypothesis when the null hypothesis is actually true. \includegraphics[width=13.03in]{images/images_6a.006} \hypertarget{balance}{% \subsection{Balance}\label{balance}} A study design is balanced if all treatments have the same sample size. Conversely, a design is unbalanced if there are unequal sample sizes between treatments. Balance is a second way to reduce the influence of sampling error on estimation and hypothesis testing. To appreciate this, look again at the equation for the standard error of the difference between two treatment means \includegraphics[width=11.75in]{images/images_6a.007} For a fixed total number of experimental units, n1 + n2, the standard error is smallest when n1 and n2 are equal. Balance has other benefits as well. For example, Analysis of Variance (discussed later in this book) is more robust to departures from the assumption of equal variances when designs are balanced or nearly so. \hypertarget{blocking}{% \subsection{Blocking}\label{blocking}} Blocking is the grouping of experimental units that have similar properties. Within each block, treatments are randomly assigned to experimental units. Blocking essentially repeats the same, completely randomized experiment multiple times, once for each block. Differences between treatments are only evaluated within blocks, and in this way the component of variation arising from differences between blocks is discarded. The cartoon below depicts two blocks. \includegraphics[width=12.28in]{images/images_6a.008} \hypertarget{blocking-paired-designs}{% \subsubsection{Blocking \textbar{} Paired designs}\label{blocking-paired-designs}} As an example of blocks with paired subjects, consider the design choices for a two-treatment experiment to investigate the effect of clear cutting on salamander density. In the completely randomized (``two-sample'') design we take a random sample of forest plots from the population and then randomly assign each plot to either the clear-cut treatment or the no clear-cut treatment. In the paired design we take a random sample of forest plots and clear-cut a randomly chosen half of each plot, leaving the other half untouched. In the paired design, measurements on adjacent plot-halves are not independent. This is because they are likely to be similar in soil, water, sunlight, and other conditions that affect the number of salamanders. As a result, we must analyze paired data differently than when every plot is independent of all the others, as in the case of the two-sample design. Paired design is usually more powerful than completely randomized design because it controls for a lot of the extraneous variation between plots or sampling units that sometimes obscures the effects we are looking for. \begin{center}\includegraphics[width=0.9\linewidth]{images/images_6a.009} \end{center} \hypertarget{blocking-randomized-complete-block-design}{% \subsubsection{Blocking \textbar{} Randomized complete block design}\label{blocking-randomized-complete-block-design}} Randomized complete block (RCB) design is analogous to the paired design, but may have more than two treatments. Each treatment is applied once to every block. As in the paired design, treatment effects in a randomized block design are measured by differences between treatments exclusively within blocks. By accounting for some sources of sampling variation blocking can make differences between treatments stand out. Blocking is worthwhile if units within blocks are relatively homogeneous, apart from treatment effects, and units belonging to different blocks vary because of environmental or other differences. \hypertarget{what-if-you-cant-do-experiments}{% \section{What if you can't do experiments?}\label{what-if-you-cant-do-experiments}} Experimental studies are not always feasible, in which case we must fall back upon observational studies. The best observational studies incorporate as many of the features of good experimental design as possible to minimize bias (e.g., blinding) and the impact of sampling error (e.g., replication, balance, blocking, and even extreme treatments) except for one: randomization. Randomization is out of the question, because in an observational study the researcher does not assign treatments to subjects. Two strategies are used to limit the effects of confounding variables on a difference between treatments in a controlled observational study: matching, and adjusting for known confounding variables (covariates). Matching is similar to the idea of blocks, but there is no random assignment. To give an example of matching, pairs of indivduals with similar explanatory variable values (e.g.~females in a specific age class) might be defined, such that one member of the pair has one level of the focal explanatory variable (e.g. ``control'') and the other member the other level (e.g. ``treatment''). Study adjustment for known confounding covariates, on the other hand, simply restricts the study to subjects in such a way to exclude variation from confounding variable(s). With this strategy, groups of individuals, for example age rages, may be simply excluded from the study alltogether. \hypertarget{exercises-associated-with-this-chapter-5}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-5}} \begin{itemize} \tightlist \item Problem Set 3 \end{itemize} \hypertarget{additional-learning-resources-5}{% \section{Additional learning resources:}\label{additional-learning-resources-5}} \begin{itemize} \tightlist \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \end{itemize} \hypertarget{introduction-to-hypothesis-tests}{% \chapter{Introduction to Hypothesis Tests}\label{introduction-to-hypothesis-tests}} \hypertarget{background-5}{% \section{Background}\label{background-5}} An extension from the concept of estimating a population parameter is the formal comparison of a such a parameter to a single value, or comparison to a value for another group, understood as another population. After all, if we can use probability and sampling theory to estimate a parameter and its uncertainty, we should also be able to compare parameter estimates, with inclusion of uncertainty about their true difference. The ``frequentist'' approach in statistics traditionally taken to perform these types of comparisons requires the definition of precise statements before we do the numerical comparisons. These statements are known as \textbf{\emph{statistical hypotheses}}, and how we frame them is very important because it dictates both how we calculate the test statistics required to compare populations and how we use probability distributions to determine how extreme the statistics are, relative to random chance. This notion of paying attention to an observed test statistic's value relative to ``how frequently'' we would expect to see a value that extreme or more extreme across multiple theoretical samples (under random expectations) is why we refer to this approach as ``frequentist'' statistical inference. We'll get more into what test statistics are and how they are calculated for various tests later in the book. For now, let's focus on how we frame hypotheses to enable the types of comparisons we wish to do as scientists. \hypertarget{null-and-alternative-hypotheses}{% \section{Null and alternative hypotheses}\label{null-and-alternative-hypotheses}} Often as empiricists we want to know whether some parameter differs between groups (populations). Perhaps the populations are different because we have differentiated them in an experiment by applying a treatment to one and no treatment to another. And, if we expect a focal variable we are measuring (i.e.~a response variable) to be fundamentally tied to the treatment based on our knowledge of the system, we can test the validity of that relationship. We might state very simply, for example, ``I hypothesize that on average the variable \emph{X} differs between the treatment and control groups.'' A hypothesis is really just a statement of belief about the world. One issue with hypotheses is that from a logic of deduction standpoint, we can't universally ``prove'' a hypothesis, only reject (i.e.~falsify) it. For this reason we must frame a \textbf{\emph{null hypothesis}} to complement our originally stated \textbf{\emph{alternative hypothesis}}. The null hypothesis represents all possibilities \emph{except} the expected outcome under the alternative. A statistical test is then conducted with respect to the null hypothesis, and if we reject the null hypothesis we typically infer support for the alternative, provided the assumptions of the statistical test about our data were valid. It's a good idea to practice the formal framing of null and alternative hypotheses, as this will help with the setting up of statistical tests and the reporting of tests in written reports or publications. Here is one example of a null and alternative hypothesis regarding trees of two different species (our populations) being of different heights. \(H_0\) : \emph{Null hypothesis} : Ponderosa pine trees are the same height on average as Douglas fir trees \(H_A\) : \emph{Alternative Hypothesis}: Ponderosa pine trees are not the same height on average as Douglas fir trees You will often see the shorthand notation above for hypotheses (\(H_0\) for null and \(H_A\) for alternative), especially when hypotheses are expressed nonverbally. An nonverbal (quantitative) expression of the above hypotheses, assuming we choose to compare ``averages'' using the mean, would be: \[H_0 : \mu_1 = \mu_2\] \[H_A: \mu_1 \neq \mu_2\] Where \(\mu_1\) and \(\mu_2\) are the population means for ponderosa pine and Douglas fir tree species, respectively. One important point to make here is that unlike the example above, hypotheses (and the statistical tests used to evaluate them) can be directional (also called ``one-sided'' or ``one-tailed''). If, for instance, we really wanted to test whether ponderosa pines are shorter, on average, than Douglas firs because we suspect this directionality, we could frame the null and alternative hypotheses as follows: \[H_0 : \mu_1 \geq \mu_2\] \[H_A: \mu_1 < \mu_2\] Remember, the null hypothesis encapsulates all outcomes not specified by the alternative. The implications regarding uncertainty (\emph{p}-values) when defining hypothesis tests as either non-directional (two-sided) or directional (one-sided) are important to understand and will be discussed below. \hypertarget{hypotheses-tests}{% \section{Hypotheses tests}\label{hypotheses-tests}} Statistical tests provide a way to perform formal evaluations we call \emph{critical tests} of null hypotheses such as in the examples above. Statistical tests require the definition of \textbf{\emph{test statistics}} that form the basis for comparison among populations. Just like raw data, test statistics are \textbf{\emph{random variables}} and depend on sampling distributions of the underlying data. In the case of parametric statistical tests (those that make use of a particular probability distribution), test statistics are calculated from the data using a specialized formula. For example we may want to test the null hypothesis that two population means are equal. One option is to calculate what is called the \emph{t}-statistic. (We will get into the details of the t-test shortly.) The \emph{t}-statistic is a standardized difference between two sample means, so a value of \emph{t} equal to zero indicates no difference between population means. We can then evaluate where our sample (data)-based value of \emph{t} falls with respect to a known theoretical distribution for \emph{t}, called the ``\emph{t}-distribution,'' for which the center and peak are at the value zero. If our observed value of the test statistic is sufficiently far from zero (i.e.~in the ``tail'' of the \emph{t}-distribution), we will decide to reject the null hypothesis. The \emph{t}-distribution is just one example of probability distributions used in statistical hypothesis testing. The figure below shows the \emph{t}-distribution and three others commonly used in statistical inference: the \emph{z}, \(\chi^2\), and \emph{F} distributions, some drawn with multiple shape parameters defined. \begin{center}\includegraphics[width=0.5\linewidth]{images/week_3.003} \end{center} The particular distribution and shape (such as those above) chosen for a statistical test depends on the whether the appropriate test statistic (such as the \emph{t}-statistic) can be calculated from your data. That determination is ultimately made by the analyst, based on test assumptions about the sample (we will cover those in turn as we discuss different tests). The shape and associated parameters of a distribution used to evaluate a test statistic also depend on sample properties such as sample size. \textbf{\emph{Degrees of freedom}}, for example are an important parameter for critical tests. The degrees of freedom for a particular test convey how many independent observations are used to estimate a population parameter of interest. Because parameter estimates are used in test statistics, we need to account for degrees of freedom. You may ask, ``shouldn't all observations in a sample be independent with respect to estimation of a parameter?'' The answer is actually, ``no'' as it turns out, because estimates (like a sample mean, for example) are calculated from the individual observations in a sample. In a sample of 10, 9 of the observations can theoretically vary freely when calculating a given sample mean, but the final, 10th observation cannot, simply based on the nature of an arithmetic mean. In this case because only 9 observations can vary independently, we have \emph{n} - 1 = 9 degrees of freedom. As mentioned, degrees of freedom determine the shape of the distributions used to evaluate test statistics. In particular, as the degrees of freedom increase (i.e.~the sample size increases), the shape of the probability distribution gets narrower. This means that a test statistic calculated from large sample will be more extreme (``further in the tail''), relative to if that same test statistic value had been calculated from a smaller sample. Recall that as a test statistic is located further into the tail of its distribution, the more extreme it is relative to our null hypothesis expectation, and therefore the smaller the \emph{p}-value is for our statistical test of the null hypothesis. In summary, we are more likely to reject the null hypothesis with a greater sample size. This gets to the concept of ``statistical power'' which we will return to below. To think about all of this at a high level, consider the plots below of two different population distributions for the same variable. In \emph{a} the two different population distributions are in very different locations. If we took even a moderately sized sample from both populations, the difference in the sample mean between the blue and red populations would be large relative to their respective variances. This means that if we calculated a \emph{t}-statistic from our samples, it would be quite large. In \emph{b} on the other hand, the population distributions are nearly on top of one another. If we calculated a \emph{t}-statistic from samples in that scenario, it would be near zero. Finally, by comparing our calculated \emph{t}-statistic to a \emph{t}-distribution with the appropriate degrees of freedom, we could deterimine (in both scenarios) how likely it is to have observed that particular value for \emph{t} under the null hypothesis of \emph{t} = 0. In \emph{a} we would observe an extreme value for \emph{t} and reject the null hypothesis, but in \emph{b} we would observe a value for \emph{t} close to the center of the distribution (at 0), and fail to reject the null hypothesis of no difference in means. \begin{center}\includegraphics[width=0.5\linewidth]{images/week_3.001} \end{center} \hypertarget{p-values-type-i-and-type-ii-error}{% \subsection{\texorpdfstring{\emph{p}-values, Type I, and Type II error}{p-values, Type I, and Type II error}}\label{p-values-type-i-and-type-ii-error}} At this point we should consider the possible outcomes of a hypothesis test. These include situations in which we may either falsely reject or falsely fail to reject the null hypothesis. The table below is a useful summary of the four possible outcomes we face when testing a null hypothesis. \begin{center}\includegraphics[width=0.5\linewidth]{images/week_3.007} \end{center} As indicated, the columns correspond to our actual evaluation of the null hypothesis (whether we reject or fail to reject it), and the rows correspond to whether the null hypothesis is actually incorrect or correct (which of course we never know unless data are simulated). In the upper left-hand scenario, we reject the null hypothesis and correctly conclude that there is an effect (e.g.~population means differ, etc.). In the upper right-hand scenario, we fail to reject the null hypothesis and conclude there is no effect, but that conclusion is wrong. In this scenario, what we call a \textbf{\emph{Type II error}} (``false negative''), there is a real effect but we ``miss'' it with our test. In the lower left-hand situation we reject the null hypothesis but do so incorrectly, as there is no real effect. This is called \textbf{\emph{Type I error}} (``false positive''), and is the error reflected in a \emph{p}-value from a statistical test. Finally, in the lower right-hand situation we fail to reject the null hypothesis and have done so correctly, as there really is no effect. You will often see the probability of Type II error represented by \(\beta\) (\emph{beta}) and the probability of Type I error represented by \(\alpha\) (\emph{alpha}). As mentioned, we usually decide to reject a null hypothesis if the \emph{p-value} for our statistical test is smaller than a given Type I error rate we are ``willing'' to tolerate. As you are probably well aware, a routinely used threshold for \(\alpha\) is 0.05. The origin of this convention dates back to a paper published by one of the founders of frequentist statistics, R. A. Fisher in 1926. In the paper, titled, ``The Arrangment of Field Experiments,'' Fisher proposed also considering more conservative \(\alpha\) thresholds of 0.02 or 0.01 if desired, but expressed his ``personal preference'' of setting the \(\alpha\) threshold at 0.05. The same passage in the paper does, however, imply that using an \(\alpha\) threshold of 0.05 to assess significance should be done in the context of \emph{multiple, repeated experiments}, in which the experimenter almost always observes \emph{p}-values less than 0.05. The latter point is certainly worth thinking about carefully, as most experimentalists today stick with the ``0.05'' convention but do not commonly repeat experiments many times. The \emph{p}-value for a statistical test, as we will re-visit below for \emph{t}-tests in more detail, is simply the area under the probability distribution that lies outside (in the tail or tails) of the test statistic value(s), and is calculated using integral calculus. You can think of a \emph{p}-value, then, as the probability of observing a test statistic at least as surprising as the one you observed based on your data, assuming the null hypothesis is correct. So, if your test statistic is far into the tail(s) of its probability distribution, it is a surprising observation under the null hypothesis. You can think of the null hypothesis as being characterized by the test statistic sampling distribution. If you were to take samples over and over again many, many times, and calculate the test statistic each time, it would follow the shape of the distribution. Again taking the \emph{t} distribution as an example of the null expectation of ``no difference between means,'' a value of zero is the most common outcome, with values in the tails much less likely. So the \emph{p}-value reflects the probability that your null hypothesis is true, and very small values suggest that we reject the null hypothesis. Here is a generic schematic that illustrates the concept of \emph{p}-values: \begin{center}\includegraphics[width=0.5\linewidth]{images/week_3.009} \end{center} To summarize, if we reject the null hypothesis, we conclude that there is evidence in favor of the alternative hypothesis (again assuming assumptions of the test are met), but we keep in mind that there is a non-zero chance of Type I error, reflected in our \emph{p}-value. If we fail to reject our null hypothesis, the current evidence suggests that we have little reason to believe our alternative is true, but again there is risk of committing Type II error. How we interpret whether we actually had enough data to confidently rule out our null hypothesis requires an estimate of \textbf{\emph{statistical power}}. \hypertarget{statistical-power}{% \subsection{Statistical power}\label{statistical-power}} Power is the probability of rejecting a false null hypothesis, which is equivalent to 1 - \(\beta\), where \(\beta\) is the Type II error rate. So, the higher the power, the more confident we can be in detecting ``an effect'' with our hypothesis test when that effect truly exists. Power is commonly calculated before an experiment (\emph{a priori}), using either simulated data, a ``pilot'' data set, or data from similar experiments. As you will see from the relationships below, pre-study power analyses can be extremely useful in determining the sample sizes required to detect an effect of a particular size. This is especially important if the resources to conduct a study are limited, and indeed, pre-study power analyses are often required for grant proposals, especially those that involve experiments with animals. As a benchmark, statisticians conventionally ``aim'' for a power of 0.8 or higher, but this is of course subject to the nature of the experiment at hand and how critical detecting true effects is. For example, certain clinical studies may need to be especially high-powered for ethical reasons. It all depends on the ``cost'' of committing Type II error. Power analyses can also be conducted after a study (\emph{post hoc}), especially if experimenters don't want to be left wondering whether they may have detected the effect associated with their alternative hypothesis, had they only a larger sample size. Just below is a generic expression for power, and how it varies given other variables associated with a particular hypothesis test. Adjustments to this expression may be required, depending on the particular statistical framework used, but it works as a good guide. Note that in this expression power is ``proportional to'' these variables as indicated, and not ``equal to.'' In many relatively simple experimental design scenarios this expression will provide practical estimates. If not, there are more complicated formulae depending on the design of your study, and there is also frequently the prospect of doing simulations to understand the power relationships inherent in your study system. \[ Power \propto \frac{(ES)(\alpha)(\sqrt n)}{\sigma}\] In the above expression power is proportional to the combination of these parameters: \begin{itemize} \item \emph{ES} = Effect size. This is the magnitude of the difference between populations you hope to detect with your test. For example, the difference in population means, etc. It can be expressed in different ways depending on the calculation, so pay attention to the input requirements of functions you are using for power analysis. \item \(\alpha\) = Type I error rate tolerated (usually 0.05). \item \emph{n} = Sample size. The number of observations per sample group. \item \(\sigma\) = Standard deviation among the experimental units within the same group. \end{itemize} We often care about the relationships depicted in the example power relationships below. For instance, we may want to know what effect size we can detect at various power levels, assuming sample size and standard deviation are fixed. Likewise, we may want to identify the smallest sample size required to detect a particular effect size, assuming a given power (e.g.~0.8) and standard deviation. \begin{center}\includegraphics[width=0.9\linewidth]{images/images_6b.002} \end{center} For a rough calculation under certain experimental design constraints, the following can be used as a ``quick'' sample size estimator when desired power and \(\alpha\) are the conventional 0.8 and 0.05, respectively. \begin{center}\includegraphics[width=13.11in]{images/images_6b.003} \end{center} \hypertarget{a-note-on-p-values-and-null-hypothesis-significance-testing-nhst}{% \subsection{\texorpdfstring{A note on \emph{p}-values and Null-Hypothesis Significance Testing (NHST)}{A note on p-values and Null-Hypothesis Significance Testing (NHST)}}\label{a-note-on-p-values-and-null-hypothesis-significance-testing-nhst}} While Null-Hypothesis Significance Tests are a useful way of framing our hypotheses and results to efficiently draw conclusions, this approach has drawn quite a bit of valid criticism in recent years. For context, many fields of research, including sociology, psychology, the biomedical sciences, and others are experiencing a `replication crisis', in which statistically significant findings have been unable to be replicated in subsequent studies, often despite similar methodologies. While there are various potential reasons for this, this crisis has revealed that it is far easier to reject a null hypothesis (at p \textless{} 0.05) with noisy data than might be assumed. If what has become a universal standard for evidence is inflating the risk of Type I errors (false positives), then how should we think about \emph{p}-values when evaluating our hypotheses? It is helpful to first recall exactly what a \emph{p}-value represents. The \emph{p}-value is defined as the probability of finding an effect \emph{at least} as extreme as the one observed, given (1) that the null hypothesis is true, and (2) that assumptions of the test are met. So what doesn't a \emph{p}-value represent? \begin{itemize} \item \textbf{A \emph{p}-value doesn't tell you whether the magnitude of an observed effect is meaningful in the context of your hypotheses}. This point becomes particularly relevant when dealing with large amounts of data - you may have noticed by studying the above power law relationship that you might correctly reject your null hypothesis (and with a very low \emph{p}-value at that) for an extremely small effect size if your sample size is very large. But small effect sizes may be irrelevant in a practical sense to your larger questions at hand. In a clinical study, for instance, a tiny effect size may not have any practical bearing on future patients' outcomes. Additionally, large sample sizes can exacerbate systematic problems in data. For example, if treatment groups are systematically biased in ways independent of your treatment, then you may detect a significant effect that reflects this bias rather than your treatment. While this can be a problem at all sample sizes, larger sample sizes increase the likelihood that even minor biases will influence your estimated effect. Small effect sizes derived from ``well powered'' studies should therefore be interpreted cautiously. \item \textbf{A \emph{p}-value doesn't tell you how well the assumptions of your analyses are met by your data}. No level of statistical significance is meaningful if the analysis is inappropriate for your data. Perhaps your data comes from a Poisson distribution but your analysis assumes normally distributed data. Perhaps there are grouping variables in your data that violate assumptions of independence if unaccounted for in your analysis. There are as many ways in which data analysis can produce invalid results as there are statistical methods. Understanding the best approach for analyzing your data and understanding the ways in which your data might violate related assumptions is a basic requirement for attaining meaningful \emph{p}-values. \item \textbf{A \emph{p}-value doesn't tell you anything about the plausibility of your \emph{a-priori} hypotheses}. Are you hypothesizing a brand new idea supported by no previous evidence, iterating off of a previous body of work in an incremental fashion, or replicating a hypothesis that's been tested multiple times in the past? How solid is the science under which you've built your hypothesis, and does your hypothesis logically follow from accepted knowledge? The more unlikely a hypothesis, the stronger standard of evidence it should be held to (in other words, exceptional claims mandate exceptional evidence). This does not mean that, say, a brand new idea or a finding that contradicts a prior body of work should be disregarded if the \emph{p}-value = 0.05, but claims to its validity should be tempered accordingly and justified beyond a statistically significant result. If this feels subjective and idiosyncratic, that's because ultimately it is. Domain knowledge is essential to the interpretation of results, particularly when inferring a causal relationship between variables. \end{itemize} Note that none of this is to suggest that \emph{p}-values are irrelevant. Rather, that effective and responsible interpretation of results demands an appreciation of all relevant factors: the plausibility of hypotheses or proposed mechanisms, the methods and quality of data collection, the appropriateness of analyses for the data and questions at hand, and, of course, effect sizes and associated significance. While we have framed the issues associated with NHST in the context of Type I errors, it is just as possible that many of the issues discussed can lead to Type II errors (false negatives), in which \emph{p}-values exceeding standard thresholds obscure meaningful effects. For this reason, it is not enough to simply require more exacting thresholds of significance. As you continue to learn foundational stats and eventually assess hypotheses using data of your own, hopefully you will treat \emph{p}-values as one among many relevant factors to consider. \hypertarget{the-t-test-and-t-sampling-distribution}{% \section{\texorpdfstring{The \emph{t}-test and \emph{t} sampling distribution}{The t-test and t sampling distribution}}\label{the-t-test-and-t-sampling-distribution}} Above we discussed that a difference between two population means can be compared using a test based on the \emph{t} distribution. The \emph{t}-test is also often referred to as ``Student's \emph{t}-test,'' because ``Student'' was the pseudonym used by a person who wrote a widely read paper (in 1908) in which the test's practical application was published for one of the first times. That person, whose real name was William Sealy Gosset, was a student of statistician Karl Pearson, but because Gosset's employer (Guinness Brewery) didn't allow their employees to publish work-related material, the pseudonym was used. As previously mentioned, the \emph{t}-test is based on a test statistic (the \emph{t}-statistic) that usually considers the difference between two sample means, to test the null hypothesis of no population difference in means. This is the so-called ``two-sample \emph{t}-test,'' and the one we will consider in this course. It is also possible to perform a ``one-sample \emph{t}-test,'' in which the sample mean from a single population is tested to differ from a fixed value (such as zero). Below we consider the calculation of the \emph{t}-statistic and two forms of the hypothesis test (one- and two-tailed) for the two-sample comparison case. Note first that the \emph{t}-statistic is simply the difference in sample means divided by the standard error for that difference, to account for variation within the two populations: \[\large t = \frac{(\bar{y}_1-\bar{y}_2)}{s_{\bar{y}_1-\bar{y}_2}} \] where \begin{center}\includegraphics[width=0.6\linewidth]{images/week_3.016} \end{center} As mentioned, the denominator is the calculation for the standard error of the mean difference, in which \emph{s} denotes the sample standard deviations for populations 1 and 2, and \emph{n} denotes the sample sizes for populations 1 and 2. The degrees of freedom (\emph{df}) for this test are equal to \(n_1+n_2-2\). For a ``one-tailed'' test, recall that our hypothesis assumes directionality in the difference in means. So, if our alternative hypothesis is that \(\mu_1>\mu_2\), a large value of \emph{t} in the right tail of the distribution - one that is \textbf{greater} than the ``critical value'' - will result in a \emph{p-value} of less than 0.05. The critical value simply marks the point beyond which the area under the probability density sums to 0.05. The generalized figure below illustrates where the critcal value \(t_c\) falls. For a one-tailed test in this case, we would reject the null hypothesis if the observed \emph{t} was greater than \(t_c\). \begin{center}\includegraphics[width=0.9\linewidth]{images/week_3.005_1_tailed} \end{center} For a ``two-tailed'' test, our hypothesis allows for the possibility that the difference in population means might be greater or less than zero (i.e.~we don't assume a directionality in the difference \emph{a priori}). In this case, we simply have to consider our critical value at both tails of the \emph{t} distribution, such that the areas under the probability density beyond the location in both tails \textbf{sum} to 0.05. And, if our observed \emph{t} is either less than \(-t_c\) or greater than \(t_c\), we would reject the null hypothesis. \begin{center}\includegraphics[width=0.6\linewidth]{images/week_3.005_2_tailed} \end{center} In \texttt{R} we can easily perform a \emph{t}-test using the \texttt{t.test()} function. For two samples we simply supply the function two vectors of values, one from sample 1 (argument \texttt{x}) and one from sample 2 (argument \texttt{y}). The default test is two-tailed, but if we want to run a one-tailed test we supply ``less'' to the \texttt{alternative} argument if we are testing whether \(\mu_1<\mu_2\), or ``greater'' to the \texttt{alternative} argument if our alternative hypothesis is that \(\mu_1>\mu_2\). Note that the \texttt{t.test()} function actually performs a ``Welch's \emph{t}-test,'' which is an adpatation of the Student's \emph{t}-test. It is very similar, with only minor calculation differences, but more reliable for unequal sample sizes and/or slightly different variances. \hypertarget{assumptions-of-parameteric-t-tests}{% \subsection{Assumptions of parameteric t-tests}\label{assumptions-of-parameteric-t-tests}} As with any parametric statistical test, we should only use a \emph{t}-test if our samples adhere to certain assumptions. Otherwise, our actual Type I and/or Type II error will not be accurately reflected by the test, and we will be more likely to draw the wrong conclusions than intended. The theoretical t-distributions for each degree of freedom were calculated based on the following assumptions: \begin{itemize} \item The response variable in the populations is normally distributed. This assumption is most easily assessed by looking at histograms for your data (samples). Confirm that your variable appears to be approximately normally distributed in your samples. \item The response variable in the populations has equal variances (if comparing two means). Informally this can be evaluated by looking at histograms or boxplots to see if the spread of distributions for both of your samples looks similar. Formally, you can perform something called an F Test for equal variances, using the \texttt{var.test()} function. \item The observations within each sample are independent. This assumption stipulates that you randomly sampled individuals from each of your populations. For example, if your populations represented different species in a specific location, you need to randomly select individuals of each species, as opposed to selecting individuals from one particular family, sub-location, shape, etc. \end{itemize} What should you do if the assumption of normality and/or equal variances is not met? There are a few alternatives. As mentioned, we call these alternatives ``non-parametric'' approaches because they do not rely on specific probability distributions, and consequently their assumptions. Nonparametric tests based on the ``rank'' of the values instead of the orignal values themselves are often an option. The Mann-Whitney \emph{U} (also called "Mann-Whitney-Wilcoxon) Test tests for distributional differences bewtween the ranks of two samples. In \texttt{R} the function \texttt{wilcox.test()} can be used to perform it, in much the same way the \texttt{t.test()} function is used. Another nonparametric option is to generate a null distribution of the appropriate test statistic from your samples, using either randomization, or resampling with replacement (i.e.~a ``bootstrap test''). These are briefly discussed below, with a simple coded example. \hypertarget{comparing-means-using-resampling-and-randomization-tests}{% \section{Comparing means using resampling and randomization tests}\label{comparing-means-using-resampling-and-randomization-tests}} In many cases when our sample data don't meet assumptions of parametric tests we can create a \textbf{\emph{null statistical distribution}} that models the distribution of a test statistic under the null hypothesis. As in the parametric approaches described above, we first calculate an \textbf{observed test statistic value} for our data. In the situation of comparing two population means, for example, we can calculate the \emph{t} statistic from our data, as above. To create the null distribution we can use either randomization or resampling. For randomization, and assuming a one-tailed test of a larger mean for population 1, we could: 1. Combine values from both populations into a single vector, 2. Randomly shuffle the vector using the \texttt{sample()} function, 3. Calculate a \emph{t} statistic based on the first n1 and n2 observations as our ``pseudo samples'' from ``populations'' 1 and 2, respectively, and save the value, 4. Repeat steps 2 and 3 many times (e.g. \$\geq\$1000), 5. Calculate the proportion of pseudo replicates in which \emph{t} is \(\geq\) to our original, observed value of \emph{t}. This proportion is our estimated \emph{p}-value for the test. An example using simulated data in \texttt{R} is as follows: \begin{Shaded} \begin{Highlighting}[] \KeywordTok{set.seed}\NormalTok{(}\DecValTok{56}\NormalTok{)} \NormalTok{pop_}\DecValTok{1}\NormalTok{ <-}\StringTok{ }\KeywordTok{rnorm}\NormalTok{(}\DataTypeTok{n=}\DecValTok{50}\NormalTok{, }\DataTypeTok{mean=}\FloatTok{20.1}\NormalTok{, }\DataTypeTok{sd=}\DecValTok{2}\NormalTok{)}\CommentTok{#simulate population 1 for this example} \NormalTok{pop_}\DecValTok{2}\NormalTok{ <-}\StringTok{ }\KeywordTok{rnorm}\NormalTok{(}\DataTypeTok{n=}\DecValTok{50}\NormalTok{, }\DataTypeTok{mean=}\FloatTok{19.3}\NormalTok{, }\DataTypeTok{sd=}\DecValTok{2}\NormalTok{)}\CommentTok{#simulate population 2 for this example} \CommentTok{# Store the t statistic calculated from our samples, using t.test()} \NormalTok{t_obs <-}\StringTok{ }\KeywordTok{t.test}\NormalTok{(}\DataTypeTok{x=}\NormalTok{pop_}\DecValTok{1}\NormalTok{, }\DataTypeTok{y=}\NormalTok{pop_}\DecValTok{2}\NormalTok{, }\DataTypeTok{alternative=}\StringTok{"greater"}\NormalTok{)}\OperatorTok{$}\NormalTok{statistic} \CommentTok{# Combine both population vectors into one} \NormalTok{pops_comb <-}\StringTok{ }\KeywordTok{c}\NormalTok{(pop_}\DecValTok{1}\NormalTok{, pop_}\DecValTok{2}\NormalTok{)} \CommentTok{# Randomly shuffle and calculate t statistic 1000 times} \NormalTok{t_rand <-}\StringTok{ }\KeywordTok{replicate}\NormalTok{(}\DecValTok{1000}\NormalTok{, \{} \NormalTok{ pops_shuf <-}\StringTok{ }\KeywordTok{sample}\NormalTok{(pops_comb)} \KeywordTok{t.test}\NormalTok{(}\DataTypeTok{x=}\NormalTok{pops_shuf[}\DecValTok{1}\OperatorTok{:}\DecValTok{50}\NormalTok{], }\DataTypeTok{y=}\NormalTok{pops_shuf[}\DecValTok{51}\OperatorTok{:}\DecValTok{100}\NormalTok{], }\DataTypeTok{alternative=}\StringTok{"greater"}\NormalTok{)}\OperatorTok{$}\NormalTok{statistic} \NormalTok{ \})} \CommentTok{# Plot the "null distribution" from the randomization-based t-values} \KeywordTok{hist}\NormalTok{(t_rand)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-83-1.pdf} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Calculate the p-value for the test as the number of randomization t-values greater} \CommentTok{# than or equal to our actual t-value observed from the data} \NormalTok{p <-}\StringTok{ }\KeywordTok{sum}\NormalTok{(t_rand}\OperatorTok{>=}\NormalTok{t_obs)}\OperatorTok{/}\DecValTok{1000} \NormalTok{p} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 0.016 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# p = 0.016, so we reject the null hypothesis of a population 1 mean less than or equal \textbackslash{}} \CommentTok{# to the population 2 mean. The population 1 mean is likely larger than the population 2 mean.} \end{Highlighting} \end{Shaded} A similar approach may be taken by randomly resampling (with replacement) from the combined vector of values for both populations, provided that the sample sizes are equal, in order to generate a null distribution against which the observed \emph{t} statistic may be compared. This approach would technically be considered a ``bootstrap'' \emph{t}-test. Both randomization and resampling approaches should yield similar results for moderate to large sample sizes. For small sample sizes the randomization approach is preferable, as all values from both populations will be included in each pseudo-replicate. \hypertarget{a-summary-of-key-components-of-hypothesis-testing}{% \section{A summary of key components of hypothesis testing}\label{a-summary-of-key-components-of-hypothesis-testing}} \begin{itemize} \item \emph{p}-value = The long run probability of rejecting a true null hypothesis. If our observed test statistic is very extreme in relation to the distribution under the null hypothesis, the \emph{p}-value will be very small. \item \(\alpha\) = The Type I error rate for a hypothesis test. Often stated as a ``critical p-value cutoff'' for experiments, as in the Type I error we are willing to tolerate. \item \(\beta\) = The Type II error rate for a hypothesis test. Often stated as a cutoff for probability of accepting a false null hypothesis. \item Power = The probability that a test will correctly reject the null hypothesis (1 - \(\beta\)). It depends on effect size, sample size, chosen \(\alpha\), and population standard deviation. \item Multiple testing = Performing the same or similar tests multiple times. When we perform multiple hypothesis tests to answer a general study question (like in the case of analyzing many genes in an RNA-seq experiment), we need to adjust the \(\alpha\) threshold to be lower than it would for a single test. There are multiple ways to correct \emph{p}-values if multiple testing is used. One correction uses a ``tax'' (e.g. \textbf{Bonferonni} adjustment) based simply on the number of tests, while another is the direct estimation of a \textbf{False Discovery Rate (FDR)}. We will return to the multiple testing problem when we consider ANOVA. \end{itemize} \hypertarget{exercises-associated-with-this-chapter-6}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-6}} \begin{itemize} \tightlist \item Problem Set 3 \end{itemize} \hypertarget{additional-learning-resources-6}{% \section{Additional learning resources:}\label{additional-learning-resources-6}} \begin{itemize} \item Irizarry, R. A. Introduction to Data Science. \url{https://rafalab.github.io/dsbook/} - A gitbook written by a statistician, with great introductions to key topics in statistical inference. \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \end{itemize} \hypertarget{correlation-and-simple-linear-regression}{% \chapter{Correlation and Simple Linear Regression}\label{correlation-and-simple-linear-regression}} \hypertarget{background-6}{% \section{Background}\label{background-6}} Quite frequently we want to know whether two continuous variables are \emph{related} based on measuring them in the same set of observations, and if so, how and how strongly they are related. When two random variables (say \emph{X} and \emph{Y}) deviate from their respective means in a systematic, predictable way, we say that they \textbf{\emph{covary}}, or that they are \textbf{\emph{correlated}} variables. Levels of expression for pairs of genes, for example, are often correlated, especially if the genes are members of the same regulatory network. Two genes may share the same transcription factor, for instance, and when the abundance of that transcription factor increases in cells, so do transcript levels for the two genes. In this case if you measure abundance of both transcripts in a sample of cells, tissues, individuals, or whatever, you may well find many observations with low expression values for both genes, many with moderate expression values for both, and many with high values for both genes. Cleary in this situation there appears to be a ``positive'' relationship bewteen the two gene expression variables, but as statisticians how do we formally describe the relationship better, and how might we make inferences about the system from a sample? This chapter focuses on the estimation of parameters, and the testing of hypotheses, relevant to relationships between quantitative variables. \hypertarget{covariance-and-correlation}{% \section{Covariance and correlation}\label{covariance-and-correlation}} Before we get into the parameters of interest and how we estimate them from samples, we should first make some practical considerations. Two variables may covary for a number of reasons, which may or may not involve one variable systematically influencing the other. We would call that a ``causal'' relationship, but covariance can arise for non-causal reasons too, such as in the example above. In that example the expression level of ``gene A'' was not influenced by the expression of ``gene B,'' but the two covaried simply because they were affected in similar ways by a third force (the transcription factor). This can be an important distinction (between causal and non-causal relationships) when thinking about how to proceed with analysis because for some statistics (like covariance and correlation) causality is not assumed or interpreted, but for other approaches (like regression) it might be. In the case of regression, which we will return to later in this chapter, there is a clear dependent (response) and independent (explanatory) variable. Regression models, especially in the case of controlled experiments in which the values of the explanatory variable are set and assigned by the experimentors, the goal is often to understand whether, and if so by what magnitude, that variable directly influences the response variable in order to test hypothesis and/or make predictions about the system. \hypertarget{covariance}{% \subsection{Covariance}\label{covariance}} We stated above that ``systematic deviation from respective means'' defines a situation in which two variables covary, but how do we actually convey this numerically? One statistic, known as the \textbf{\emph{covariance}}, multiplies each \emph{y} and \emph{x} deviation (for a given observation) from its respective mean, sums that product across all observations, and divides by the total number of observations to yield an average. If individual values of one variable deviate from their mean in one direction, and \emph{corresponding} values of the other variable consistently deviate from their mean in the same (or the opposite) direction, the products in the sum will be either consistently positive or consistently negative, resulting in a substantial positive covariance, or a substantial negative covariance, respectively. If there is no consistent, directional deviation for the two variables, on the other hand, the products will sum to a covariance of zero (no relationship between variables).\\ The \emph{population} covariance can be expressed as: \[cov(X,Y)=\sigma_{XY}=\frac{\sum_{i=1}^N (x_i-\mu_x)(y_i-\mu_y)}{N}\] Where \(x_i\) and \(y_i\) correspond to the values of random variables \emph{X} and \emph{Y} for the \emph{i}th observation in a populaiton of size \emph{N}, and \(\mu_x\) and \(\mu_y\) are the respective population means. Again, the important takeaway is that when the product of \((x_i-\mu_x)\) and \((y_i-\mu_y)\) is \emph{consistently} positive or negative across observations, the \emph{x} and \emph{y} variables are consistently deviating from their means in a similar or opposite manner, resulting in a positive or negative covariance.\\ To estimate covariance from a \emph{sample}, we divide by the degrees of freedom (\emph{n} - 1) instead of dividing by \emph{n}: \[cov(x,y)=s_{xy}=\frac{\sum_{i=1}^n (x_i-\bar{x})(y_i-\bar{y})}{n-1}\] \hypertarget{correlation}{% \subsection{Correlation}\label{correlation}} Remember when we noted (in Chapter 9) that variables with larger values on average tend to have larger variances as well (the ``positive mean-variance relationship'')? This dependence of variance magnitude on variable ``scale'' similarly applies to covariance. That is, if one or more variables that covary have relatively large values, it will be reflected in the magnitude of the covariance. For this reason, and much in the same way we use the coefficient of variation (CV) to adjust for scale when comparing standard deviations, we often use a standardized covariance called the \textbf{\emph{correlation coefficient}} that is obtained by dividing the covariance by the standard deviations of \emph{x} and \emph{y}. The correlation coefficient, therefore, ranges from -1 to 1. Values of -1 and 1 indicate perfect linear relationships, and a value of 0 indicates uncorrelated variables. The correlation coefficient (sometimes called the Pearson correlation coefficient) for a \emph{population} is: \[\rho_{XY}=\frac{cov(X,Y)}{\sigma_X\sigma_Y} \] Where \(cov(X,Y)\) is the population covariance between variables \emph{X} and \emph{Y}, and \(\sigma_X\) and \(\sigma_Y\) are the population standard deviations for \emph{X} and \emph{Y}. For a \emph{sample}, the Pearson correlation coefficient can be calculated as: \[r_{xy}=\frac{\sum_{i=1}^n (x_i-\bar{x})(y_i-\bar{y})}{\sqrt{\sum_{i=1}^n (x_i-\bar{x})^2}\sqrt{\sum_{i=1}^n (y_i-\bar{y})^2}}\] Where \(\bar{x}\) and \(\bar{y}\) are the sample means for variables \emph{x} and \emph{y}, and \emph{n} is the sample size. The following scatter plots show a range of scenarios for two variables \emph{x} and \emph{y}, depicting various relationship types and corresponding covariance and correlation values. \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-84-1.pdf} In plots A and B we see a positive covariance and correlation. In B the covariance is large because the scale of \emph{x} is 10 times larger than in A, but the correlation coefficient is the same. In C we see a negative relationship between \emph{x} and \emph{y} (\emph{y} decreases as \emph{x} increases), and with covariance and correlation of greater magnitude than in A, owing to a ``tighter'' relationship. In plot D values for the variables \emph{x} and \emph{y} were both drawn randomly and independently, so there is no significant correlation or covariance. \hypertarget{hypothesis-tests-for-correlation}{% \subsection{Hypothesis tests for correlation}\label{hypothesis-tests-for-correlation}} Formal hypothesis tests about correlation concern whether or not the population correlation coefficient (\(\rho\)) differs from zero. The null and alternative hypothesis statements are as follows \[H_0 : \rho_1 = 0\] \[H_A: \rho_1 \neq 0\] The null hypothesis can be tested by calculating a \emph{t} statistic, which is the sample correlation coefficient (\emph{r}) standardized by its standard error. Below is one way to calculate \emph{t}: \[t=r\sqrt{\frac{n-2}{1-r^2}}\] Where \emph{n} is the sample size and \emph{r} is the sample correlation coefficient. This \emph{t} statistic can then be compared to a \emph{t} distribution with \emph{n}-2 degrees of freedom. In \texttt{R} the function \texttt{cor.test()} can be used for this parametric test, but keep in mind that the following assumptions apply: \begin{itemize} \item The relationship being tested under the alternative hypothesis is assumed to be linear (as opposed to strongly curvilinear), as the Pearson correlation coefficient won't characterize non-linear relationships adequately. \item The ``joint probability distribution'' of the two variables in the population (and therefore the sample) is assumed to be bivariate normal. For this to be true, both \emph{x} and \emph{y} variables should be approximately normally distributed in the sample. \end{itemize} There are non-parametric alternatives to test the above null hypothesis that \(\rho\) = 0 when either of these assumptions is not met. Rank-based approaches calculate a test statistc based on the ranks of \emph{x} and \emph{y} values, so they are appropriate as long as the association between the variables is monotonic (consistently increasing or decreasing) in nature. The Spearman's rank correlation test is best suited for small sample sizes (e.g. \emph{n} \textless{} 30), and the Kendall's tau (\(\tau\)) test is more appropriate for larger sample sizes. These tests can also be performed in \texttt{R} using the \texttt{cor.test()} function, by supplying either ``pearson'' or ``kendall'' to the \texttt{method} argument. Yeta another nonparametric option would be to peform a randomization or bootstrap test for \(\rho\) = 0, by either shuffling or resampling \emph{x} and \emph{y} values independently to generate a null distribution for the sample correlation coefficient \emph{r}. You may have noticed that correlation analysis can tell us whether, in what direction, and how ``tightly'' two variables are correlated, but it is agnostic with respect to other properties of the relationship, namely the \emph{steepness} of the relationship (i.e.~the rate at which \emph{y} decreases (or increases) with an increase in \emph{x}). This parameter, which is extremely important to understand in a variety of practical contexts, is inferred using linear regression. \hypertarget{simple-linear-regression}{% \section{Simple linear regression}\label{simple-linear-regression}} We can also model linear relationships between variables using linear equations, with which you are probably quite familiar. Linear regression, as we refer to this approach in statistics, has been around since the 19th Century, when the biometrician Francis Galton developed it to understand phenotypic similarity between human parents and their offspring. One of the traits Galton studied extensively, for example, was adult height. Linear regression models describe how the magnitude of a response variable \emph{y} changes as a function of a predictor variable \emph{x}, based on the generic equation \(y=bx+a\). In this equation \emph{b} (the slope) gives the amount of change that occurs in \emph{y} per unit of \emph{x}, and \emph{a} is the ``y-intercept'' (the value of \emph{y} when \emph{x} = 0). Not surprisingly, \emph{b} \textgreater{} 0 indicates a positive relationship between \emph{x} and \emph{y}, \emph{b} \textless{} 0 indicates a negative relationship, and when \emph{b} = 0 there is no linear relationship between \emph{x} and \emph{y} If we consider \emph{X} and \emph{Y} as random variables in a population, from an estimation standpoint we may naturally be interested in estimating the population slope \(\beta_1\). The population y-intercept \(\beta_0\) is also a parameter in the linear regression model, but it is usually of little interest inference-wise. Under our usual sampling-based inference framework we can represent a simple linear regression model as: \[y_i=\beta_0+\beta_1x_i+\varepsilon_i\] Where our sample includes \emph{y} and \emph{x} values across \emph{i} observations, and with the aforementioned designations for population slope and intercept. Importantly, because we rarely expect a perfect, straight-line relationship between \emph{X} and \emph{Y}, we include an ``error'' (or ``residual'') term \(\varepsilon_i\) in the model. This term absorbs any ``noise'' (i.e.~random error unexplained by the effect of \emph{X}), and can be quantified by departures of \emph{y} values from the straight line dictated by the model. We will return to these departures, also called ``residuals'' repeatedly in this chapter and the next. You may be asking how we estimate \(\beta_1\) and \(\beta_0\) from a sample. Similar to using a formula to calculate a sample mean, a sample correlation coefficient, etc., we can calculate a sample slope (\(b_1\)) and intercept (\(b_0\)) using one of several ``best fit'' equations. One of these, known as \textbf{\emph{ordinary least squares (OLS)}}, or ``model I regression,'' derives a linear equation for a straight line such that the vertical distances between \emph{y}-values in the sample and points on the line (the ``predicted'' \emph{y}-values) are minimized. Effectively, the goal with this approach is to minimize the variation in \emph{y} unexplained by \emph{x}. The slope and intercept of this ``best fit line'' are (\(b_1\)) and (\(b_0\)), our estimates for \(\beta_1\) and \(\beta_0\). \hypertarget{hypothesis-tests-in-linear-regression}{% \subsection{Hypothesis tests in linear regression}\label{hypothesis-tests-in-linear-regression}} The first hypothesis testing approach for linear regression involves the individual parameters (\(\beta_1\) and \(\beta_0\)) themselves. We can state a null hypothesis for the slope and intercept: \[H_0: \beta_1=0\] \[H_0: \beta_0=0\] Alternative hypotheses are, of course, that these parameters are not equal to zero. As discussed, a nonzero population slope indicates a relationship between \emph{X} and \emph{Y}, and the slope's magnitude indicates the rate at which \emph{Y} changes with \emph{X}. A nonzero \emph{y}-intercept indicates the ``background'' level for \emph{Y} in the absence of \emph{X} but, as stated, is usually not of too much interest. Both the sample slope and sample intercept can be used to calculate respective \emph{t} statistics (\(t=\frac{b}{s_b}\)), where the denominator is the usual standard error of the point estimate, and \emph{t} can be compared to a \emph{t} distribution with \emph{n} - 2 degrees of freedom. A more generalized framework for testing linear regression hypotheses involves considering the amount of variation explained by the \textbf{\emph{full}} linear model (\(y_i=\beta_0+\beta_1x_i+\varepsilon_i\)) relative to the amount of variation it does not explain. If the amount of variation in \emph{y} explained by the full model is significantly greater than the amount unexplained, we should reject our null hypothesis of a zero slope. In practice, though, we can only directly measure \emph{unexplained variation}, so we calculate the difference between the unexplained variation in the \textbf{\emph{reduced model}}, in which the slope is set to zero (\(y_i=\beta_0+\varepsilon_i\)), and the unexplained variation in the full model above. If this difference is large, it means that the full model explains a lot of variation in \emph{y}, and, as said, we should reject our null hypothesis. You may be wondering how we quantify the variation unexplained in full and reduced linear models. This brings us back to the concept of \textbf{\emph{residuals}}. By calculating the sum of squared deviations of observed \emph{y} values from \emph{y} values predicted under a given model (which we call \(\hat{y}\)s), we can measure unexplained variation. These measures are referred to ``sums of squares'' (SS). Based on what was stated above, the expression \(\frac{SS_{reduced}-SS_{full}}{SS_{full}}\) gives us the ratio we need to test the null hypothesis. The following example illustrates how SSs work for reduced and full models. \begin{center}\includegraphics[width=14.22in]{images/images_4b.011} \end{center} In this example, there appears to be a negative relationship between body mass and captures. The reduced model, with a slope of zero, is not as good a fit, so the SS (reflected in the vertical lines characterizing residuals) is greater than in the full model. Under the assumptions below, the ratio of explained to unexplained variation (called an \emph{F}-ratio) can be compared to the \emph{F} distribution for the null hypothesis test. Extremely large values of \emph{F} are unlikely to be observed due to random chance under the null hypothesis, so if the \emph{F}-ratio is large enough, we reject the null hypothesis. \begin{itemize} \item A linear relationship between the variables under the alternative hypothesis is assumed. Non-linear relationships (such as curvilinear ones) are not modeled adequately by this framework and need to be analyzed differently. This assumption can be checked with a scatter plot. \item Both variables are assumed to be normally distributed, so samples should also reflect normality, and can be checked in the usual ways (boxplots, histograms, etc.). \item Variance of the response variable (i.e. \emph{y}) is assumed to be homogeneous across all values of the explantatory variable (i.e. \emph{x}). In regression, this assumption is evaluted in the context of the fitted line. The residuals should form a uniform ``band'' of points when plotted against predicted values of \emph{y}. A ``residual plot'' will address this assumption. Below is an example of what to look for in that plot type. \end{itemize} \begin{center}\includegraphics[width=14.22in]{images/images_4b.018} \end{center} In the plots above, (a) shows the expected residual pattern under our assumptions, while (b), (c), and (d) show patterns of unequal or systematically changing variance, all violations of linear regression assumptions. The section below describes how to view a residual plot in \texttt{R}. \hypertarget{linear-regression-in-r}{% \subsection{\texorpdfstring{Linear regression in \texttt{R}}{Linear regression in R}}\label{linear-regression-in-r}} Fitting a regression model in \texttt{R} is very simple. We use the function \texttt{lm()} to specify the structure of the model. The \texttt{lm()} function can actually be used to fit an entire class of models we call ``general linear models.'' We will return to this idea in the next chapter, when we discuss categorical predictors and ANOVA. For now, know that you can fit a simple regression model with \texttt{lm()} using the simple \texttt{\textasciitilde{}} syntax. The response (\emph{y}) variable goes to the left of the \texttt{\textasciitilde{}}, and the predictor variable to the right. Below is an example of how to fit a regression model for the toy data set (panel A) used to demonstrate covariance and correlation above. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## First, plot the relationship with a scatter plot} \KeywordTok{plot}\NormalTok{(x_}\DecValTok{1}\NormalTok{, y_}\DecValTok{1}\NormalTok{, }\DataTypeTok{cex=}\FloatTok{0.7}\NormalTok{, }\DataTypeTok{pch=}\DecValTok{19}\NormalTok{, }\DataTypeTok{ylab=}\StringTok{"y"}\NormalTok{, }\DataTypeTok{xlab=}\StringTok{"x"}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-87-1.pdf} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Define the model using the lm() function and assign it to the object "reg_mod_1"} \NormalTok{reg_mod_}\DecValTok{1}\NormalTok{ <-}\StringTok{ }\KeywordTok{lm}\NormalTok{(y_}\DecValTok{1} \OperatorTok{~}\StringTok{ }\NormalTok{x_}\DecValTok{1}\NormalTok{)} \CommentTok{## We can make a residual plot to help evaluate assumptions} \KeywordTok{plot}\NormalTok{(reg_mod_}\DecValTok{1}\OperatorTok{$}\NormalTok{fitted.values, reg_mod_}\DecValTok{1}\OperatorTok{$}\NormalTok{residuals, }\DataTypeTok{xlab=}\StringTok{"predicted y"}\NormalTok{, }\DataTypeTok{ylab=}\StringTok{"residuals"}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-87-2.pdf} As you can see in the residuals plot, our assumption of equal variance looks to be met. There is no clear structure in the relationship between fitted values and residuals. Now lets look at model details. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## We can use the summary() function to look at parameter estimates and hypothesis tests} \KeywordTok{summary}\NormalTok{(reg_mod_}\DecValTok{1}\NormalTok{)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## ## Call: ## lm(formula = y_1 ~ x_1) ## ## Residuals: ## Min 1Q Median 3Q Max ## -3.0042 -0.8783 -0.2644 1.1652 2.2413 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 1.02754 0.52547 1.955 0.0577 . ## x_1 0.79920 0.07803 10.243 1.29e-12 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 1.437 on 39 degrees of freedom ## Multiple R-squared: 0.729, Adjusted R-squared: 0.7221 ## F-statistic: 104.9 on 1 and 39 DF, p-value: 1.292e-12 \end{verbatim} How do we interpret all of this output? The most important details begin with your \textbf{Coefficients}. Here we can see values associated with our \texttt{(Intercept)}, corresponding to \(\beta_0\) in the standard linear model, and the slope associated with our predictor(s), in this case \texttt{x\_1}, corresponding to \(\beta_1\). The four values associated with each term are the actual estimate for that coefficient, the standard error of that estimate, its associated test statistic (a t-value for linear regression), and finally the \emph{p}-value. Note that the last two values are derived under the null assumption that the intercept \(\beta_0\) and the slope \(\beta_1\) are equal to 0. Based on the test statistics associated with \texttt{x\_1}, and that our data seems to meet our assumptions of the test, we should reject the null hypothesis of no linear relationship with high confidence. \hypertarget{a-note-on-the-coefficient-of-determination}{% \subsection{A note on the coefficient of determination}\label{a-note-on-the-coefficient-of-determination}} There is a clear connection between regression and correlation if we consider the sources of unexplained variation in a regression model. As it turns out \(1-\frac{SS_{full}}{SS_{reduced}}\) quantifies the proportion of variance in \emph{y} that is explained by \emph{x}. This quantity is also called \(r^2\), the ``coefficient of determination,'' and, for simple linear regression, is the square of the correlation coefficient \(r\). \(r^2\)s (sometimes called ``R-squared values'') are commonly reported in regression analysis results. \hypertarget{a-note-on-model-ii-regression}{% \subsection{A note on model II regression}\label{a-note-on-model-ii-regression}} As stated, OLS regression assumes that we don't have any error associated with our explanatory variable (\emph{x}) values. While this certainly can be the case for experiments in which we set those values or establish them with great precision, at least, in many cases (especially in observational or descriptive studies) we have as much measurement error for the \emph{x} variable as we do for the \emph{y} variable. In these cases, that uncertainty of measurement for \emph{x} needs to be accounted for when fitting regression models. We use models classified as ``model II'' for these cases. Going into details about them is beyond the scope of this course, but you should at least know that they exist. The figure below depicts how residuals are calculated for three different versions of model II regression. \begin{center}\includegraphics[width=13.89in]{images/images_5a.002} \end{center} \begin{itemize} \item Major Axis (MA) regression should be used when \emph{x} and \emph{y} have the same error, and they have the same units or are dimensionless. \item Ranged Major Axis (ranged MA) regresion should be used when there is error in both \emph{x} and \emph{y}, but if they are on different scales or have different units. This approach should not be used when there are outliers (observations with large residuals). \item Reduced Major Axis (RMA or SMA) regression should be used when there is error in both \emph{x} and \emph{y}, but if they are on different scales or have different units. This method is robust to outliers and used when the two variables are strongly correlated. \end{itemize} \hypertarget{exercises-associated-with-this-chapter-7}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-7}} \begin{itemize} \tightlist \item Problem Set 3 \end{itemize} \hypertarget{additional-learning-resources-7}{% \section{Additional learning resources:}\label{additional-learning-resources-7}} \begin{itemize} \item Irizarry, R. A. Introduction to Data Science. \url{https://rafalab.github.io/dsbook/} - A gitbook written by a statistician, with great introductions to key topics in statistical inference. \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \end{itemize} \hypertarget{introduction-to-analysis-of-variance}{% \chapter{Introduction to Analysis of Variance}\label{introduction-to-analysis-of-variance}} \hypertarget{background-7}{% \section{Background}\label{background-7}} In the last chapter we covered analysis situations in which we want to understand the relationship between two continuous variables. We also learned that in some of those situations there is a clear response (\emph{y}) and a clear predictor (\emph{x}) variable, making possible the application of linear regression analysis. In this chapter we will continue to work within the space of response and predictor variables, but now we consider one or more predictors that are categorical (not quantitative) in nature. This type of study design is especially common when we apply treatments that fall into classes (e.g. ``mutant'' vs ``wild-type'' in genetics) or observe explanatory factors in nature that are qualitative (e.g.~some climatic and geological conditions). One approach to this type of problem is called ``Analysis of Variance'' (ANOVA or AOV), and it, like other frequentist methods we have discussed, was formalized in the early 1900s. The general idea behind ANOVA is that we can test hypotheses about differences in group means for a response variable by comparing average within-group variance to among-group variance. In this case the ``groups'' are different factor levels of our explanatory variable(s). When within-group variances are substantially smaller than the among-group variance it stands to reason, given a few assumptions, that the distributions (and therefore the means) of at least some of the ``groups'' are different. Interestingly, this exercise of variance partitioning is tractable in a regression framework, because we can calculate sums of squares to reflect different variance components, and we can conceptualize the degree of difference between group means much in the same way we think about a slope in a regression model. For this reason, approaches such as regression, ANOVA, and others are all categorized as \textbf{\emph{general linear models}}. \hypertarget{general-linear-models}{% \section{General linear models}\label{general-linear-models}} As mentioned, we can express the effects of categorical predictor variables on a numeric response in models that are very similar to regression models. Recall that for regression, we used the following straight-line model: \[y_i=\beta_0+\beta_1x_i+\varepsilon_i\] Where \(y\) is the response variable, \(x\) is the predictor variable, \(\beta_0\) is the \emph{y}-intercept, \(\beta_1\) is the slope, and \(\varepsilon\) is the unexplained variation, or error. In the case of a single categorical predictor, for example, we can similarly include effects of each factor level relative to the overall mean of the response variable, as follows: \[y_{ij}=\mu+\beta_1(level_1)_{ij}+\beta_2(level_2)_{ij}+...+\varepsilon_{ij}\] Where each group (factor level) \(i\) contains a number of observations \(j\), \(\mu\) is the overall mean of \(y\), \(\beta\)s represent the effects of the corresponding factor levels relative to the overall mean, and \(\varepsilon_{ij}\) is the error term. You can think of \(\mu\) as being analogous to the \emph{y}-intercept, and the \(\beta\)s as adding or subtracting effect sizes to or from the grand mean. Because factor levels don't actually take on numeric values, in practice they are encoded using what are called binary ``dummy'' variables. If a particular observation is in group \(i\), it is represented as a ``1'', and otherwise as a ``0''. So, although your data frame (in \texttt{R}) may include a factor with three factor levels (for example ``A'', ``B'', and ``C''), under the hood \texttt{R} functions use three dummy variables to encode that factor and perform the appropriate calculations. A shorthand linear model notation, which collapses all level effects for a given factor into one term (denoted by \(\alpha\)) is often used: \[y_{ij}=\mu+\alpha_i+\varepsilon_{ij}\] Where \(\alpha_i\) represents the effect of belonging to group \(i\), expressed as the difference between each group \(i\) mean (\(\mu_i\)) and the overall mean (\(\mu\)). This notation is more convenient, especially when more than one factor is included in the model, a situation we will address later in the chapter. \hypertarget{single-factor-anova}{% \section{Single-factor ANOVA}\label{single-factor-anova}} Single-factor Analysis of Variance describes the case in which we have a single quantitative response variable and a single categorical predictor variable. As discussed, the predictor variable (which we call a \textbf{\emph{factor}}) consists of two or more \textbf{\emph{factor levels}} that make up the possible conditions, or categories, of that variable. The procedure for ANOVA involves calculating sum of squares (SS) describing variation between/among factor levels (groups), and the SS descrbing variation within each group. We divide each of these SS values by the appropriate degrees of freedom (resulting in values we refer to as ``mean squares'' or MS). Finally we divide the group-level MS (\(MS_{groups}\)) by the within-group MS (called \(MS_{residual}\) because it represents the residual variation not explained by the factor). This value is an \emph{F}-ratio, which should sound familiar from the regression section of last chapter. Recall that an \emph{F}-ratio (in this case \(F=\frac{MS_{groups}}{MS_{residual}}\)) quantifies how much variation in the response variable is explained by a model, relative to how much variation is not explained by it. Large \emph{F}-ratios in the case of ANOVA indicate that the explanatory variable (the factor) is explaining a significant amount of variation in \emph{y} relative to the overall variation. We compare \emph{F} to an \emph{F} distribution with the appropriate degrees of freedom in order to calculate our \emph{p}-value for a given hypothesis test. Let's walk through an example to help visualize what is actually going on when we perform single-factor ANOVA. Say that we are studying the percent time that male mice experiencing discomfort spend ``stretching,'' and that we are intereseted in how social context influences this variable. We have data from an actual experiment (Langford et al.~2006) in which mice experiencing mild discomfort (result of injection of 0.9\% acetic acid into the abdomen) were randomly assigned to one of three social treatments: 1. isolation, 2. housed with a companion mouse not injected, or 3. housed with a companion mouse also injected and exhibiting ``stretching'' behaviors associated with discomfort. The results suggest that mice stretch the most when a companion mouse is also experiencing mild discomfort. Mice experiencing pain appear to ``empathize'' with co-housed mice also in pain. To verbally state a linear model for the analysis of this experiment, we might say : \(stretching=mean_{overall}+treatment\). This model statement includes a response variable, a constant, and an explanatory variable. If we plot the data, we can see the respective distributions for time spent stretching among the three different treatments. \begin{center}\includegraphics[width=0.8\linewidth]{images/images_5b.014} \end{center} Note that in this type of plot (sometimes referred to as a ``strip chart''), the points are ``jittered'' with respect to the factor levels along the \emph{x}-axis, to assist with seeing all of the points clearly. In \texttt{R} we can use either the \texttt{plot()} function or the \texttt{stripchart()} function with the \texttt{method="jitter"} argument to do this. By the looks of the plot, we might reasonably suspect that the ``injected companion'' treatment appears to shift the percent time spent stretching up, relative to the other groups. As a consequence, we would expect the among-group SS to be larger than the average within-group SS. To calculate the appropriate \emph{F}-ratio, for this data set, we would use the MS equations from column 3 of the table below (from Logan 2010): \begin{center}\includegraphics[width=1\linewidth]{images/images_5b.018} \end{center} The next figure (also from Logan 2010) shows how SS, and therefore MS, calculations are visualized in a situation with two factor levels. Our mouse example from above includes three factor levels, but the concept is exactly the same. The quantity in the red box divided by the quantity in the blue box forms our \emph{F}-ratio, which we can then use to test our null hypothesis of no effect of treatment. \begin{center}\includegraphics[width=0.9\linewidth]{images/images_5b.019} \end{center} \hypertarget{single-factor-anova-hypothesis-tests}{% \subsection{Single-factor ANOVA hypothesis tests}\label{single-factor-anova-hypothesis-tests}} For the type of single-factor ANOVA described above, we can state our null and alternative hypotheses in terms of the group means, or in terms of the ``effect'' of the explanatory variable. These are of course equivalent, but let's state them below (in terms of the mouse stretching experiment) just to be complete. For population means, we could state the following: \[H_0:\mu_{isolated}=\mu_{companion}=\mu_{inj-companion}\] \[H_A:\mu_{isolated}\neq\mu_{companion}=\mu_{inj-companion}\] OR \[\mu_{isolated}=\mu_{companion}\neq\mu_{inj-companion}\] OR \[\mu_{isolated}\neq\mu_{companion}\neq\mu_{inj-companion}\] Recall that ANOVA tests for \textbf{\emph{any difference}} among groups, so there are multiple possible scenarios of group difference when we have more than two factor levels. Stated in terms of ``effect,'' we can state the hypotheses as follows: \[H_0:\alpha_i=0\] \[H_A:\alpha_i\neq0\] Where \(i\) represents any of our three factor levels (treatments) At this point we should also introduce (briefly) the idea of fixed versus random effects (factors) in linear models and ANOVA. So far when discussing explanatory variables we have understood them as factors with levels we want to explicity compare. In the mouse experiment above, comparing the effects of the three social treatments on time spent stretching was a clear focus of our inference. We wanted to test specifically whether those three factor levels could be influencing the response variable. This is an example of a \textbf{\emph{fixed factor or effect}}. If groups are predetermined, of direct interest, and/or repeatable, they should most likely be treated as a fixed effect. Examples include experimental treatments, doses, age groups, habitat type, season, etc. Any conclusions reached from the analysis are specific to those factor levels and should not be generalized to other possible factor levels. In some cases, though, we may be interested in whether a response variable is affected generically by a factor with a large range of possible levels. Examples may include plots, animal cages, kinship units, batches, buildings, etc. In these cases, assuming we don't care about the individual factor levels \emph{per se}, we can instead think of the factor levels in an experiment as a random sample of many possible levels. We call factors in this situation \textbf{\emph{random factors or effects}}. Many of the factors in your own studies will be fixed, but you should also consider the possibility of random factors in your study design. As stated below, how we frame the hypothesis for ANOVA depends on whether a factor is fixed versus random. Complex ANOVA models that include both fixed and random factors are called ``mixed models,'' and they are beyond the scope of this course. It is good practice, however, to learn the basic distinction between fixed and random effects. The null and alternative hypotheses for a random effect consider whether the variance associated with differing levels is zero: \[H_0:\sigma_{\alpha}^2=0\] \[H_A:\sigma_{\alpha}^2\neq0\] The consideration is whether including the factor in the model explains any variance in the response variable. \hypertarget{anova-assumptions}{% \subsection{ANOVA assumptions}\label{anova-assumptions}} The following assumptions should be met if the \emph{F}-ratio and \emph{F} distribution are used to evaluate an ANOVA hypothesis: \begin{itemize} \item \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item The response variable is approximately normally distributed in all groups (factor levels). Some departures from normality are tolerable if sample sizes and variances across the groups are equal. The normality assumption can be evaluated by histograms, and boxplots, for example. \end{enumerate} \item \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{1} \tightlist \item Variances are equal across groups. As long as there is no clear relationship between variance and mean or variance and sample size across groups, some departures from this assumption are tolerable if sample sizes are equal. Boxplots, mean vs.~variance plots, and equal variances tests can be used to address this assumption. \end{enumerate} \item \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{2} \tightlist \item Observations in a group are independent. As discussed previously, observations in a group should be randomly sampled and not structured in any way. If there is structure within groups (e.g.~via kinship, etc.), that structure should be dealt with by adding the appropriate nested terms to the model (see below). \end{enumerate} \end{itemize} \hypertarget{post-hoc-comparisons}{% \subsection{Post-hoc comparisons}\label{post-hoc-comparisons}} We stated above that a fixed, single-factor ANOVA tests whether any of the group means are different from one another. This hypothesis test may be sufficient to address the questions of the analyst, but in many cases we want to know which factor levels are significantly different. This can be achieved via several different approaches. Planned comparisons or ``contrasts'' are defined in advance of the ANOVA, and performing them is part of the model evaluation. Setting up planned contrasts requires defining comparisons based on rules, which include the avoidance of comparisons among groups that overlap, for one. Learning to set them up properly is a more advanced topic beyond what we can reasonably cover in this course. Post-hoc, or ``unplanned'' comparisons, however, make all pairwise comparisons among factor levels and are relatively straightforward to perform. Their implementation is similar to the performance of individual \emph{t}-tests, adjusted to account for the inflated Type I error associated with multiple testing. The \texttt{R} package \texttt{multcomp} can be used to perform a variety of post-hoc comparisons. \hypertarget{single-factor-anova-in-r}{% \subsection{\texorpdfstring{Single-factor ANOVA in \texttt{R}}{Single-factor ANOVA in R}}\label{single-factor-anova-in-r}} In \texttt{R} we can use either the \texttt{lm()} or \texttt{aov()} functions to define fixed-effects ANOVA models, and then evaluate the models (e.g.~run \emph{F} tests, print ANOVA tables, etc.) using functions like \texttt{summary()} or \texttt{anova()} on the fitted model objects. The \texttt{aov()} function will format calculations in such a way as to present a traditional ANOVA table, whereas the \texttt{lm()} function will enable presentation of the parameter estimates. In the example below we will perform ANOVA using the \texttt{iris} data set to test whether there is a difference in mean sepal length among three flower species. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{stripchart}\NormalTok{(iris}\OperatorTok{$}\NormalTok{Sepal.Length }\OperatorTok{~}\StringTok{ }\NormalTok{iris}\OperatorTok{$}\NormalTok{Species, }\DataTypeTok{vertical=}\NormalTok{T, }\DataTypeTok{method=}\StringTok{"jitter"}\NormalTok{,} \DataTypeTok{ylab=}\StringTok{"sepal length"}\NormalTok{, }\DataTypeTok{xlab=}\StringTok{"species"}\NormalTok{, }\DataTypeTok{pch=}\DecValTok{19}\NormalTok{, }\DataTypeTok{cex=}\FloatTok{0.5}\NormalTok{)} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-93-1.pdf} \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Set up and evaluate linear model using lm() and summary()} \NormalTok{iris_lm <-}\StringTok{ }\KeywordTok{lm}\NormalTok{(Sepal.Length }\OperatorTok{~}\StringTok{ }\NormalTok{Species, iris)} \KeywordTok{summary}\NormalTok{(iris_lm)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## ## Call: ## lm(formula = Sepal.Length ~ Species, data = iris) ## ## Residuals: ## Min 1Q Median 3Q Max ## -1.6880 -0.3285 -0.0060 0.3120 1.3120 ## ## Coefficients: ## Estimate Std. Error t value Pr(>|t|) ## (Intercept) 5.0060 0.0728 68.762 < 2e-16 *** ## Speciesversicolor 0.9300 0.1030 9.033 8.77e-16 *** ## Speciesvirginica 1.5820 0.1030 15.366 < 2e-16 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## ## Residual standard error: 0.5148 on 147 degrees of freedom ## Multiple R-squared: 0.6187, Adjusted R-squared: 0.6135 ## F-statistic: 119.3 on 2 and 147 DF, p-value: < 2.2e-16 \end{verbatim} In this output there are a few items that are especially useful. The most useful information is in the first 2 columns of the ``Coefficients'' table, which contain the group mean estimates and their standard errors. In default linear model output in \texttt{R}, the intercept is actually set to the mean of the first (alphabetically) factor level. In this case, ``setosa'' is the first factor level, so its mean (5.006) is the intercept. The means for ``versicolor'' and ``virginica'' are the \textbf{\emph{intercept plus the value in the column}}. So the versicolor mean is 5.936 and the virginica mean is 6.588. The last line in the output shows the results for the ANOVA null hypothesis test of equal means across all three species, including the \emph{F}-ratio, the groups and residuals degrees of freedom, respectively, and the \emph{p}-value. \begin{Shaded} \begin{Highlighting}[] \CommentTok{# Set up and evaluate ANOVA model using aov() and anova()} \NormalTok{iris_aov <-}\StringTok{ }\KeywordTok{aov}\NormalTok{(Sepal.Length }\OperatorTok{~}\StringTok{ }\NormalTok{Species, iris)} \KeywordTok{anova}\NormalTok{(iris_aov)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Analysis of Variance Table ## ## Response: Sepal.Length ## Df Sum Sq Mean Sq F value Pr(>F) ## Species 2 63.212 31.606 119.26 < 2.2e-16 *** ## Residuals 147 38.956 0.265 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 \end{verbatim} Here the output is the ANOVA table, complete with the degrees of freedom, SS, and MS for groups and residuals, our \emph{F}-ratio, and our \emph{p}-value. Clearly, we reject the null hypothesis. There is at least one difference among the three group means. Finally, if we wanted to know whether all three species are different from each other, we could apply a ``Tukey's'' (post-hoc) test of all three mean pairs. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(multcomp)} \end{Highlighting} \end{Shaded} \begin{Shaded} \begin{Highlighting}[] \KeywordTok{summary}\NormalTok{(}\KeywordTok{glht}\NormalTok{(iris_aov, }\DataTypeTok{linfct =} \KeywordTok{mcp}\NormalTok{(}\DataTypeTok{Species =} \StringTok{"Tukey"}\NormalTok{)))} \end{Highlighting} \end{Shaded} \begin{verbatim} ## ## Simultaneous Tests for General Linear Hypotheses ## ## Multiple Comparisons of Means: Tukey Contrasts ## ## ## Fit: aov(formula = Sepal.Length ~ Species, data = iris) ## ## Linear Hypotheses: ## Estimate Std. Error t value Pr(>|t|) ## versicolor - setosa == 0 0.930 0.103 9.033 <1e-08 *** ## virginica - setosa == 0 1.582 0.103 15.366 <1e-08 *** ## virginica - versicolor == 0 0.652 0.103 6.333 <1e-08 *** ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ## (Adjusted p values reported -- single-step method) \end{verbatim} The \emph{p}-values adjusted for multiple comparisons are reported, and they are all very low. We therefore conclude that all three species means are significantly different from one another. \hypertarget{a-note-on-nonparametric-tests-similar-to-single-factor-anova}{% \subsection{A note on nonparametric tests similar to single-factor ANOVA}\label{a-note-on-nonparametric-tests-similar-to-single-factor-anova}} As with most categories of hypothesis test, there are nonparametric alternatives to ANOVA when the assumptions are not met. Randomization tests similar to the ones we have discussed previously can be used to generate a null distribution of \emph{F}-ratios on which to base a hypothesis test. There is also a rank-based ANOVA alternative that is similar to the Mann-Whitney U test we cited as a \emph{t}-test alternative. It is called the Kruskal-Wallis test, and is robust to non-normality and group variance differences. The Kruskal-Wallis test can be performed in \texttt{R} using the base function \texttt{kruskal.test()}. \hypertarget{multi-factor-anova}{% \section{Multi-factor ANOVA}\label{multi-factor-anova}} When more than one categorical predictor variable is included in a study, we can apply our general linear model and ANOVA framework to accommodate this complexity. Indeed, it is often very important to include multiple predictor variables in a model, even if they are not of primary interest, and especially if they are expected to explain a significant amount of variation in the response variable. If these addtional factors do explain variation in \emph{y}, it is crucial to include them in the model so that we can account for that variation and in the process ``isolate'' variation explained by the focal predictor(s) central to our questions. For multiple fixed-effect factors, the guidelines described for single-factor ANOVA in the previous section (including hypotheses, assumptions, and model structure in \texttt{R}) hold. The only difference is that more than one term is added to the model on the ``right-hand side'' of the equation (or the \texttt{\textasciitilde{}} in \texttt{R} model notation), with some added syntax depending on the nature of the model. In the sections below we will briefly cover two different forms of multi-factor ANOVA: ``nested'' and ``factorial'' models. Nested models allow us to deal with observations that are, in some structured way, not independent, and factorial models allow us to test for statistical interactions among factors. \hypertarget{nested-anova}{% \subsection{Nested ANOVA}\label{nested-anova}} In many observational studies and experiments the sampling units we measurements are heterogeneous in some way, be it spatially, temporally, or structurally. Measurements on individual organisms may vary substantially over time. Measurements in a plot may vary greatly depending on the region of the plot measured. The phenotypic effects of a mutation may vary across similar genetic backgrounds (e.g.~a family). In all of these cases, there could be a clear advantage to taking multiple, related measurments to capture some of this variation, especially when we have an experimental factor (e.g.~a treatment) that we are trying to test and understand. This is where \textbf{\emph{nested models}} come into play. We group observations (e.g.~sub-replicates) or measurements (e.g.~repeated measurements from the same individual) that are not independent of one another, according to a ``nested'' term in our ANOVA model. This allows us to account for that heterogeneity mentioned above, and is necessary to avoid violating the assumption of independence. When the appropriate nestedness is not included in a model, observations or measurements that are not independent are used erroneously to calculate mean squares, resulting in an artificially low \emph{p}-value. This is called \textbf{\emph{pseudoreplication}}. The schematic below shows the structure of a model with a main factor of interest (``A'') and a nested factor (``B'') which groups the non-independent subreplicates. \begin{center}\includegraphics[width=0.9\linewidth]{images/images_7a.005} \end{center} In this hypothetical example we have \emph{k} subreplicates for each site. Assuming we had 3 subreplicates for each site and did not include ``B'' (site) as a nested term in our model, we would erroneously include a sample size of 9 for each treatment group. By including ``B'' we properly account for the non-independence of observations within each site. \hypertarget{nested-anova-hypothesis-tests}{% \subsubsection{Nested ANOVA hypothesis tests}\label{nested-anova-hypothesis-tests}} Hypothesis tests for nested ANOVAs take on the same basic structure as single-factor models, except that we can define a null and alternative hypothesis for each factor in our model. Typically nested terms are included to account for variation unexplained by the main factor(s) of interest and properly structure non-independence, so they are almost always considered as random effects. So hypotheses for main factors are stated as in the single-factor ANOVA section above, and hypotheses for nested factors are usually stated in random effects format: \[H_0(B):\sigma_{\beta}^2=0\] \[H_A(B):\sigma_{\beta}^2\neq0\] Where the null hypothesis states that all possible levels of B within each level of the main factor (A) contribute no added variance to the response variable. The same assumptions we addressed for single-factor ANOVA hold for nested ANOVA. In the case of the independence assumption, we still have to ensure that levels of the nested term are independent. In the example above, the three ``sites'' within each treatment should be independent of one another. \hypertarget{nested-anova-in-r}{% \subsubsection{\texorpdfstring{Nested ANOVA in \texttt{R}}{Nested ANOVA in R}}\label{nested-anova-in-r}} As mentioned, mixed model ANOVA is not always straightforward to set up in \texttt{R}, but simple cases (e.g.~one main and one nested factor) can be analyzed easily. The function \texttt{lm()} function will not accommodate mixed models, so we instead rely on \texttt{aov()} if the design is balanced (equal sample sizes) and \texttt{lme()} (package \texttt{nlme}) or \texttt{lmer()} (package \texttt{lme4}) for unbalanced designs. Below is some hyptothetical \texttt{R} code that might be applied to the one main, one nested, two-factor example design above. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## With aov() use "Error" with parentheses to specify the nested term.} \CommentTok{## This will report the hypothesis test for the main effect.} \NormalTok{mod_nested <-}\StringTok{ }\KeywordTok{aov}\NormalTok{(Response }\OperatorTok{~}\StringTok{ }\NormalTok{Treatment }\OperatorTok{+}\StringTok{ }\KeywordTok{Error}\NormalTok{(Site), df_name)} \KeywordTok{summary}\NormalTok{(mod_nested)} \CommentTok{## To report the relative % variation explained by the main vs. nested factor,} \CommentTok{## we use the lme() function to fit the model and then VarCorr() to report variance components.} \CommentTok{## Note that the nested term notation is different for lme()} \KeywordTok{library}\NormalTok{(nlme)} \KeywordTok{library}\NormalTok{(lme4)} \NormalTok{mod_nested_lme <-}\StringTok{ }\KeywordTok{lme}\NormalTok{(Response }\OperatorTok{~}\StringTok{ }\NormalTok{Treatment, }\DataTypeTok{random=}\OperatorTok{~}\DecValTok{1}\OperatorTok{|}\NormalTok{Site, df_name)} \KeywordTok{VarCorr}\NormalTok{(mod_nested_lme)} \end{Highlighting} \end{Shaded} \hypertarget{factorial-anova}{% \subsection{Factorial ANOVA}\label{factorial-anova}} Another example of multi-factor ANOVA is the factorial model. In a strictly factorial ANOVA we don't have nestedness, but the design is set up so that each level from one fixed-effects (usually) factor is observed together with the levels of another fixed-effects (usually) factor, and those ``factor level combinations'' are replicated. Factorial ANOVA allows one to test hypotheses for the main effects (the individual factors) and ``interactions'' between the factors. A statistical interaction is when the effect of a level from one factor depends on the level from another factor. For example, we may have a two-level factor like genotype (mutant vs.~wild-type) and a two-level treatment factor (control vs.~toxin). If all 4 combinations of the levels from those two factors are replicated across individuals in our experiment, we would have what is called a ``two-by-two'' factorial design. If we found that both mutant and wild-type groups respond similarly to the treatment (have the same control-toxin difference in means), there is no interaction. If, on the other hand, there is a difference in response (e.g.~there is a toxin-control difference for the wild-type group but not for the mutant group), we would say that there is evidence for an interaction, and in this example a ``genotype-by-environment'' interaction. The phenomenon of epistais in genetics is also an example of an interaction, between two or more loci, in which the phenotypic effect of a genotype at one locus depends on the genotype at another locus. Because an interaction can take different forms, understanding the nature of an interaction is often made easier by plotting group means (and standard errors) in what is called an ``interaction plot.'' The \texttt{R} function \texttt{interaction.plot()} can be used to produce two-by-two interactiosn plots, or you can use \texttt{plot()}, \texttt{points()}, and \texttt{segments()} functions to make a custom interaction plot. In the figure below (from Logan 2010), we see several possible scenarios for a hypothetical two-by-two factorial design. On the \emph{x}-axis are the two levels for the Temperature factor (high and low), and the two lines (dashed and solid) represent low and high fertilizer, respectively. \begin{center}\includegraphics[width=0.75\linewidth]{images/images_7a.022} \end{center} The upper-left plot shows a likely interaction between temperature and fertilizer, in which there is an effect of fertilizer on seedling growth rate, but only at the high temperature. In the upper-right plot we see what look like overall main effects of temperature and fertilizer, but not interaction (the lines are parallel). In the lower-left plot we see an effect of fertilizer, but no effect of temperature in both fertilizer treatments, so no interaction. In the lower-right plot we see a ``crossing'' interaction, in which fertilizer has an opposite effect at the two temperatures. In cases like the last one, it is possible to detect no significant main effects (because they ``average out''), but an obviously strong interaction. \hypertarget{factorial-anova-hypothesis-tests}{% \subsubsection{Factorial ANOVA hypothesis tests}\label{factorial-anova-hypothesis-tests}} Fixed- and random-effects hypotheses for the individual factors in a factorial ANOVA are subject to the same hypothesis statements mentioned above. However, a separate null hypothesis for each interaction term is testable in factorial models. As mentioned, for two factors (A and B) there is a single interaction term (A:B), with the following null and alternative hypotheses, assuming fixed main effects: \[H_0(AB):\mu_{ij}=\mu_i+\mu_j-\mu\] \[H_A(AB):\mu_{ij}\neq\mu_i+\mu_j-\mu\] The interaction null hypothesis may look a bit strange, but it's really just saying that if you compare a difference bewteen levels of factor A within one level of factor B, to the same difference within another level of factor B, that ``difference of a difference'' should be zero if there is no interaction. If at least one of the factors is a random-effects factor, then the interaction is understood as a random effect, with the following hypotheses: \[H_0(AB):\sigma_{\alpha\beta}^2=0\] \[H_A(AB):\sigma_{\alpha\beta}^2\neq0\] The null hypothesis states that there is no additional variation in \emph{y} contributed by all possible interactions among all possible factor levels of A and B. The same assumptions we addressed for single-factor ANOVA hold for factorial ANOVA, except that in this case groups are defined by factor level combinations, so the assumptions have to be met for each of those groups. \hypertarget{factorial-anova-in-r}{% \subsubsection{\texorpdfstring{Factorial ANOVA in \texttt{R}}{Factorial ANOVA in R}}\label{factorial-anova-in-r}} In \texttt{R} we can use either the \texttt{aov()} function to define fixed-effects factorial ANOVA models, and then evaluate the models (e.g.~run \emph{F} tests, print ANOVA tables, etc.) using functions like \texttt{summary()} or \texttt{anova()} on the fitted model objects. The \texttt{aov()} function will format calculations in such a way as to present a traditional ANOVA table, for example when running \texttt{anova()} on an object from \texttt{aov()}, as we did in the case of single-factor ANOVA above. Let's run a quick example of two-by-two factorial ANOVA below using the \texttt{mtcars} data frame. In this case we are interested in whether miles per gallon (\texttt{mpg}) is affected by the engine cylinder configuration (``V'' or ``straight''), the transmission type (``automatic'' or ``manual''), and their interaction. Before we specify the model and run the ANOVA, let's look at the group means in an interaction plot. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## First, modify the vs and am factor levels to be more descriptive} \NormalTok{mtcars_mod <-}\StringTok{ }\NormalTok{mtcars} \NormalTok{mtcars_mod}\OperatorTok{$}\NormalTok{vs <-}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(}\KeywordTok{ifelse}\NormalTok{(mtcars}\OperatorTok{$}\NormalTok{vs}\OperatorTok{==}\DecValTok{0}\NormalTok{, }\StringTok{"V"}\NormalTok{,}\StringTok{"straight"}\NormalTok{))} \NormalTok{mtcars_mod}\OperatorTok{$}\NormalTok{am <-}\StringTok{ }\KeywordTok{as.factor}\NormalTok{(}\KeywordTok{ifelse}\NormalTok{(mtcars}\OperatorTok{$}\NormalTok{am}\OperatorTok{==}\DecValTok{0}\NormalTok{, }\StringTok{"auto"}\NormalTok{,}\StringTok{"manual"}\NormalTok{))} \CommentTok{## Make an interaction plot} \KeywordTok{with}\NormalTok{(mtcars_mod, \{} \KeywordTok{interaction.plot}\NormalTok{(}\DataTypeTok{x.factor=}\NormalTok{am, }\DataTypeTok{trace.factor=}\NormalTok{vs, }\DataTypeTok{response=}\NormalTok{mpg, }\DataTypeTok{xlab=}\StringTok{"transmission"}\NormalTok{,} \DataTypeTok{trace.label=}\StringTok{"cyl config"}\NormalTok{) } \NormalTok{\})} \end{Highlighting} \end{Shaded} \includegraphics{foundational_statistics_files/figure-latex/unnamed-chunk-100-1.pdf} The lines look fairly parallel, but let's run a factorial ANOVA to test for the two main effects, and their interaction, with respect to miles per gallon. When specifying interactions in \texttt{R} models, there are options. In the two-by-two factorial case with fixed effects, we can set up the model in two, equivalent ways. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## To specify the main and interaction effects individually} \NormalTok{mpg_fac1 <-}\StringTok{ }\KeywordTok{aov}\NormalTok{(mpg }\OperatorTok{~}\StringTok{ }\NormalTok{vs }\OperatorTok{+}\StringTok{ }\NormalTok{am }\OperatorTok{+}\StringTok{ }\NormalTok{vs}\OperatorTok{:}\NormalTok{am, mtcars_mod)} \CommentTok{## Shorthand notation to include all main effects and interactions} \NormalTok{mpg_fac2 <-}\StringTok{ }\KeywordTok{aov}\NormalTok{(mpg }\OperatorTok{~}\StringTok{ }\NormalTok{vs}\OperatorTok{*}\NormalTok{am, mtcars_mod)} \end{Highlighting} \end{Shaded} And we can obtain the ANOVA table using the \texttt{anova()} function \begin{Shaded} \begin{Highlighting}[] \KeywordTok{anova}\NormalTok{(mpg_fac1)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Analysis of Variance Table ## ## Response: mpg ## Df Sum Sq Mean Sq F value Pr(>F) ## vs 1 496.53 496.53 41.1963 5.981e-07 *** ## am 1 276.03 276.03 22.9021 4.984e-05 *** ## vs:am 1 16.01 16.01 1.3283 0.2589 ## Residuals 28 337.48 12.05 ## --- ## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 \end{verbatim} Consistent with our suspicion from the plot, we do not reject the null hypothesis of no interaction. There is evidence for the main effects of cylinder configuration (\texttt{vs}) and transmission (\texttt{am}), but not for an interaction (\texttt{vs:am}) between them. In other more complex scenarios with multiple terms and interactions, interpretation can often be complicated. It definitely helps to use interaction plots to make those interpretations more clearly, but one can also perform either contrasts or post-hoc comparisons among groups to better understand significant differences among factor level combinations. It is also possible to have both nested and factorial terms in the same, ``partly nested'' analysis model. More advanced courses and reading on mixed general linear models go into these complex situations in depth, and evaluating these models requires careful consideration in many cases. \hypertarget{exercises-associated-with-this-chapter-8}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-8}} \begin{itemize} \tightlist \item Problem Set 4 \end{itemize} \hypertarget{additional-learning-resources-8}{% \section{Additional learning resources:}\label{additional-learning-resources-8}} \begin{itemize} \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \item Langford, D. J.,et al.~2006. Science 312: 1967-1970. - example used for single-factor ANOVA \end{itemize} \hypertarget{introduction-to-frequency-analysis}{% \chapter{Introduction to Frequency Analysis}\label{introduction-to-frequency-analysis}} \hypertarget{background-8}{% \section{Background}\label{background-8}} Up to this point we have dealt exclusively with response variables that are continuous. It is possible, and relatively common, to encounter response variables that are not continuously distributed. For example, we may have a binary response variable or a categorical response with more than two levels. One way to approach the analysis of these discrete variables is to tally the number of observations in each category and compare these ``frequencies'' (proportions) to a null (i.e.~random) expectation for the frequencies, possibly with respect to another factor. In this multi-factor situation we often want to know whether the two categorical variables are independent of one another, that is, whether an observation is more or less likely than expected by random chance to take on a certain level of factor A, given it is characterized by a certain level of factor B. Hypothesis tests of this nature are often called ``tests of independence.'' Another common goal is to compare the frequencies of observations across factor levels to an expectation based on certain ``rules'' of a natural system. This type of test is called a ``goodness of fit'' test, and one example of its application is to test expected genetic patterns of Mendelian segregation using offspring phenotypes that result from a particular cross of plant or animal parents. In this chapter we will focus on fundamental application of goodness of fit tests and tests of independence. Although we will not cover it in this course, you should also be aware of another widely used analytical framework for discrete response variables: generalized linear models. These models take a non-continuous response variable and mathematically relate it to a linear combination of predictor variables, via something called a ``link'' function." For a binary response variable, for example, observations are modeled probabilistically as if they vary between 0 and 1 (even though they don't in reality), using an approach called logistic regression. More advanced statistical inference courses cover generalized linear models, as these approaches are frequently used to analyze counts, binary variables, frequencies, and ordinal categorical variables. \hypertarget{goodness-of-fit-tests}{% \section{Goodness of fit tests}\label{goodness-of-fit-tests}} The null hypothesis for a goodness of fit test is that the relative frequencies of observations across categories in a population occur at a specific ratio. In practice this means that we need to compare observed frequency ratios with expected ones. If the deviation of observed from expected ratios is high, our test statistic should reflect that how extreme it is and be useful in a null hypothesis test. One such test statistic is the chi-square (\(\chi^2\)) statistic. It is calculated based on a sum of values (across categories) that reflects how much the observed frequencies differ from the expected: \[\chi^2=\sum\frac{(o-e)^2}{e}\] Where \(o\) and \(e\) are observed and expected counts, respectively in each category. If the observed and expected frequencies are the same (our null hypothesis), \(\chi^2\) sums to zero, or approximately zero considering sampling noise. We compare this test statistic calculated from a sample to a \(\chi^2\) distribution with degrees of freedom equal to the number of categories minus 1. Especially large values of \(\chi^2\) fall in the tail of the distribution and are extremely likely under the null hypothesis. The probablility of observing a value at least this extreme is our \emph{p}-value for the hypothesis test. In nearly all cases it makes sense to perform a one-sided test in this direction, but in principle a left-sided test could be performed to test for ``artificially high'' congruence of observed and expected frequencies. \hypertarget{assumptions-of-the-chi-square-test}{% \subsection{Assumptions of the chi-square test}\label{assumptions-of-the-chi-square-test}} There are two assumptions for a valid null hypothesis test using the \(\chi^2\) statistic and its theoretical distribution: \begin{itemize} \item The observations are classified independently of one another, which should be satisfied through a random sample. \item Only a small proportion of the categories (20\% or less) should have expected frequencies of less than five. Increasing the sample size sufficiently will ensure that this assumption is met. Other tests are more appropriate if this assumption cannot be met. \end{itemize} \hypertarget{goodness-of-fit-tests-in-r}{% \subsection{\texorpdfstring{goodness of fit tests in \texttt{R}}{goodness of fit tests in R}}\label{goodness-of-fit-tests-in-r}} As an example, let's say that we repeated one of Gregor Mendel's pea crossing experiments in which we were tracking the inheritance pattern for two traits: pea color (yellow vs.~green) and pea shape (smooth vs.~dented). Assuming independent assortment and unbiased segregation, we would expect a dihybrid cross (between two heterozygous parents) for these tratis to yield an expected 9:3:3:1 ratio of phenotype combinations in the progeny. We can perform a quick chi-square test in \texttt{R} to test the null hypothesis that our observed progeny from the cross adhere to this expected ratio. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## First, we create a vector for our observed counts} \NormalTok{pea_count <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\DecValTok{160}\NormalTok{, }\DecValTok{39}\NormalTok{, }\DecValTok{48}\NormalTok{, }\DecValTok{11}\NormalTok{)} \CommentTok{## Next, we create a vector of factor levels that name the 4 different categories, using the `gl()` function, and combine into a data frame} \NormalTok{pea_type <-}\StringTok{ }\KeywordTok{gl}\NormalTok{(}\DecValTok{4}\NormalTok{,}\DecValTok{1}\NormalTok{,}\DecValTok{4}\NormalTok{, }\KeywordTok{c}\NormalTok{(}\StringTok{"yellow_smooth"}\NormalTok{,}\StringTok{"yellow_dent"}\NormalTok{,}\StringTok{"green_smooth"}\NormalTok{,}\StringTok{"green_dent"}\NormalTok{))} \NormalTok{pea_data <-}\StringTok{ }\KeywordTok{data.frame}\NormalTok{(pea_type, pea_count)} \CommentTok{## Many frequency test functions need the data formatted as a "table," so we need to reformat} \NormalTok{pea_table <-}\StringTok{ }\KeywordTok{xtabs}\NormalTok{(pea_count }\OperatorTok{~}\StringTok{ }\NormalTok{pea_type, }\DataTypeTok{data=}\NormalTok{pea_data)} \CommentTok{## Before the test, let's evaluate our 20% of expected frequencies < 5 assumption.} \CommentTok{## We can do this by running the chi-square test and pulling out just the expected counts vector} \KeywordTok{chisq.test}\NormalTok{(pea_table, }\DataTypeTok{p=}\KeywordTok{c}\NormalTok{(}\DecValTok{9}\OperatorTok{/}\DecValTok{16}\NormalTok{, }\DecValTok{3}\OperatorTok{/}\DecValTok{16}\NormalTok{, }\DecValTok{3}\OperatorTok{/}\DecValTok{16}\NormalTok{, }\DecValTok{1}\OperatorTok{/}\DecValTok{16}\NormalTok{), }\DataTypeTok{correct=}\NormalTok{F)}\OperatorTok{$}\NormalTok{exp} \end{Highlighting} \end{Shaded} \begin{verbatim} ## yellow_smooth yellow_dent green_smooth green_dent ## 145.125 48.375 48.375 16.125 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## It looks like all 4 expected counts are greater than 5, so we will proceed.} \KeywordTok{chisq.test}\NormalTok{(pea_table, }\DataTypeTok{p=}\KeywordTok{c}\NormalTok{(}\DecValTok{9}\OperatorTok{/}\DecValTok{16}\NormalTok{, }\DecValTok{3}\OperatorTok{/}\DecValTok{16}\NormalTok{, }\DecValTok{3}\OperatorTok{/}\DecValTok{16}\NormalTok{, }\DecValTok{1}\OperatorTok{/}\DecValTok{16}\NormalTok{), }\DataTypeTok{correct=}\NormalTok{F)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## ## Chi-squared test for given probabilities ## ## data: pea_table ## X-squared = 4.9733, df = 3, p-value = 0.1738 \end{verbatim} Here we fail to reject the null hypothesis that our data adhere to our 9:3:3:1 ratio expectation, given that our \emph{p}-value is quite high. If the 20\% or fewer of expected frequencies \textless{} 5 assumption had not been met, several test statistic corrections (e.g.~Williams'), or a randomization test could have been applied. Also, if differences between observed and expected values are especially small (i.e.~much smaller than the expected values) the G-test (see below) can be a more powerful alternative. \hypertarget{tests-of-independence-for-frequencies}{% \section{Tests of independence for frequencies}\label{tests-of-independence-for-frequencies}} The null hypothesis for frequency-based tests of independence is that the two or more categorical variables are independent of one another. Tests of independence do not assume causal relationships between variables, but causality may be argued depending on context. To test the independence of two categorical variables we organize the sample data into what is known as a ``contingency table,'' in which the rows represent the conditional category counts of variable 1 and the columns represent the conditional category counts of variable 2. Expected frequencies (under the null hypothesis of independence) for each cell in the table can be calculated by the product of the row and column total divided by the overall total. One possible test is to use these expected frequencies to calculate a (\(\chi^2\)) statistic with degrees of freedom equal to the number of rows minus 1 multiplied by the number of columns minus 1. Another way to test the null hypothesis of independence between categorical variables is with something called a G-test. This test is a form of the ``likelihood ratio test,'' which is a test that evaluates whether the likelihoods (see Chapter 8) of two models or hypotheses are equal by evaluating how extreme their ratio (in practice the natural log of their ratio) is. If the log-likelihood ratio is extreme, the two models are likely different in how well they fit the data, and we reject the null hypothesis. For G-tests of independence, we sum up log-likelihood ratios (based on observed divided by expected counts) across all categories (cells in our contingency table), and then compare twice that sum to a (\(\chi^2\)) distribution with degrees of freedom again equal to \((n_{rows}-1)*(n_{columns}-1)\): \[G^2=2\sum{o*ln(\frac{o}{e})}\] Where \(o\) and \(e\) are observed and expected cell counts, respectively from the contingency table. Again, especially large values of \(G^2\) will result in rejection of the null hypothesis. G-tests of independence have the same aforementioned assumptions as the chi-square test. \hypertarget{g-test-of-independence-in-r}{% \subsection{\texorpdfstring{G-test of independence in \texttt{R}}{G-test of independence in R}}\label{g-test-of-independence-in-r}} As an example of a G-test of independence, we can use the base \texttt{R} data set \texttt{HairEyeColor} to test whether hair and eye color are independent in a sample of female students from the University of Delaware in the 70s. We will use the \texttt{GTest()} function from the package \texttt{DescTools}. \begin{Shaded} \begin{Highlighting}[] \KeywordTok{library}\NormalTok{(DescTools)} \end{Highlighting} \end{Shaded} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## First, use indexing to retrieve just the female data from the 3-D table} \NormalTok{hair_eye_females <-}\StringTok{ }\NormalTok{HairEyeColor[,,}\DecValTok{2}\NormalTok{]} \CommentTok{## Test the assumption that 20% or fewer of expected frequencies are < 5} \KeywordTok{chisq.test}\NormalTok{(hair_eye_females, }\DataTypeTok{correct=}\NormalTok{F)}\OperatorTok{$}\NormalTok{exp} \end{Highlighting} \end{Shaded} \begin{verbatim} ## Eye ## Hair Brown Blue Hazel Green ## Black 20.26837 18.93930 7.642173 5.150160 ## Brown 55.73802 52.08307 21.015974 14.162939 ## Red 14.42173 13.47604 5.437700 3.664537 ## Blond 31.57188 29.50160 11.904153 8.022364 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Only one cell of 16 has a count < 5, so proceed with the G-test} \KeywordTok{GTest}\NormalTok{(hair_eye_females)} \end{Highlighting} \end{Shaded} \begin{verbatim} ## ## Log likelihood ratio (G-test) test of independence without correction ## ## data: hair_eye_females ## G = 112.23, X-squared df = 9, p-value < 2.2e-16 \end{verbatim} We reject our null hypothesis of independence between hair and eye color for this data set. It is extremely likely that certain hair colors are more or less likely to co-occur with certain eye colors than what is expected through random chance. There is very strong evidence of a statistical association between hair and eye color in this sample. \hypertarget{odds-ratios}{% \subsection{odds ratios}\label{odds-ratios}} Rejecting a null hypothesis in a test of independence is one thing, but it doesn't tell us much about how or to what extent the variables are independent. For one, recall our past discussion of statistical versus practical significance. A \emph{p}-value from a statistical test does not, by itself, tell us how big a difference between or effect on populations actually is. For this, we rely on the calculation and reporting of \textbf{\emph{effect sizes}}. An effect size, such as a slope in a regression analysis, a difference in means, a ``fold-change'' in a gene expression analysis, etc., qunatifies the effect on or association between variables. Furthermore, in tests of independence with multiple factors or many more than two categories per factor, the strurctural details of how categories depend on one another may be complex and are not apparent in a \emph{p}-value. Fortunately, we can appraise what are equivalent to effect sizes for frequency data by looking at the cells and marginal totals in a contingency table, and by calcuating what are called \textbf{\emph{odds ratios}} to measure the magnitude of indpendence among categories. The term ``odds'' in statistics simply refers to how likely a particular outcome is relative to how likely all other possible outcomes are. The formal expression of this is \(\pi_j=(1-\pi_j)\), with \(\pi_j\) representing the probability of that particular event occurring. If we flip a fair coin once, for example, the probability of getting ``tails'' is 0.5, and the probability of all other alternatives is 0.5. So, our odds of getting tails is \(0.5/(1-0.5) = 1\). An odds of one, also referred to as ``even odds'' simply means that getting tails is equally likely relative to all other possibilities (in this case getting ``heads''). But we can also compare odds for two different conditions that are distinct in some way, which is where odds ratios come into play. Say that we have a coin that we know is not fair, and specifically we know (because we flipped it a million times) that the probability of getting tails is actually 0.6. If we calculate an odds ratio for this ``tails-biased'' coin relative to the ``fair'' coin, we get \(\frac{0.6/(1-0.4)}{0.5/(1-0.5)}=1.5\). The odds ratio tells us how many times more (or less) likely a particular event is in one scenario versus another. In this example, we are 1.5 times more likely to get a tails with the tails-biased coin than with the fair coin. This same logic applies to contingency tables. We might, for instance, compare the odds of selecting (from the \texttt{HairEyeColor} sample) a person with brown eyes who also has black hair, to the odds of selecting a person with brown eyes who also has blonde hair. In a situation where one has rejected the null hypothesis of independence, the odds ratios that deviate the most from one will give us some signal of where the association between variables is most likely coming from. For contingency table odds ratio calculations, the following simplified form of equation can be used for a 2x2 table to calculate the odds ratio \(\theta\): \[\theta=\frac{(cell_{1,1}+0.5)(cell_{2,2}+0.5)}{(cell_{1,2}+0.5)(cell_{2,1}+0.5)}\] Where the counts in the cells of a \(r\)x\(c\) table are denoted as \(cell_{r,c}\). A value of 0.5 is usually added to each cell count to prevent division by zero. If we revisit the coin flip example, but with (unrealistically perfect) coin flip data, we can work through how the odds ratio is calculated using this equation. \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Set up our imaginary coin flip data} \CommentTok{## Rows will represent the tail-heavy and fair coin, respectively} \CommentTok{## Columns will represent number of tails, and heads, respectively in 100 flips} \NormalTok{flips <-}\StringTok{ }\KeywordTok{matrix}\NormalTok{(}\KeywordTok{c}\NormalTok{(}\DecValTok{60}\NormalTok{, }\DecValTok{40}\NormalTok{, }\DecValTok{50}\NormalTok{, }\DecValTok{50}\NormalTok{), }\DataTypeTok{ncol=}\DecValTok{2}\NormalTok{, }\DataTypeTok{byrow=}\OtherTok{TRUE}\NormalTok{)} \KeywordTok{colnames}\NormalTok{(flips) <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"tails"}\NormalTok{,}\StringTok{"heads"}\NormalTok{)} \KeywordTok{rownames}\NormalTok{(flips) <-}\StringTok{ }\KeywordTok{c}\NormalTok{(}\StringTok{"tail_biased"}\NormalTok{,}\StringTok{"fair"}\NormalTok{)} \NormalTok{flips <-}\StringTok{ }\KeywordTok{as.table}\NormalTok{(flips)} \NormalTok{flips} \end{Highlighting} \end{Shaded} \begin{verbatim} ## tails heads ## tail_biased 60 40 ## fair 50 50 \end{verbatim} \begin{Shaded} \begin{Highlighting}[] \CommentTok{## Perform the odds ratio calculation for odds of tails with the biased coin over odds of tails for the fair coin} \NormalTok{odds_ratio_flips <-}\StringTok{ }\NormalTok{((flips[}\DecValTok{1}\NormalTok{,}\DecValTok{1}\NormalTok{]}\OperatorTok{+}\FloatTok{0.5}\NormalTok{)}\OperatorTok{*}\NormalTok{(flips[}\DecValTok{2}\NormalTok{,}\DecValTok{2}\NormalTok{]}\OperatorTok{+}\FloatTok{0.5}\NormalTok{))}\OperatorTok{/}\NormalTok{((flips[}\DecValTok{1}\NormalTok{,}\DecValTok{2}\NormalTok{]}\OperatorTok{+}\FloatTok{0.5}\NormalTok{)}\OperatorTok{*}\NormalTok{(flips[}\DecValTok{2}\NormalTok{,}\DecValTok{1}\NormalTok{]}\OperatorTok{+}\FloatTok{0.5}\NormalTok{))} \NormalTok{odds_ratio_flips} \end{Highlighting} \end{Shaded} \begin{verbatim} ## [1] 1.493827 \end{verbatim} We see that this odds ratio is \textasciitilde{}1.5, the same as calculated based on probabilies above. Here the value is a bit smaller than 1.5 because of the ``convenience'' 0.5 addtions. In practice, calculating odds ratios from tables that are larger than 2x2 requires splitting up those tables into ``partial tables,'' because odds ratios must always be calculated in a pairwise manner. Also, it is common to interpret odds ratios after taking their natural logarithm. These ``log odds'' or ``LOD'' values are necessary when calculating confidence intervals, for example. In \texttt{R}, odds ratios can be calculated manually from a contingency table, or using a function such as \texttt{oddsratio()} from the \texttt{epitools} package. \hypertarget{a-final-note-on-presenting-statistical-test-results-in-writing}{% \section{A final note on presenting statistical test results in writing}\label{a-final-note-on-presenting-statistical-test-results-in-writing}} One final topic we should cover briefly in this course is the written presentation of statistical results. It is important to present your statistical analysis results in a clearly stated, consistent, and efficient manner, especially in reports and scientific articles or books. The general guidelines below apply to at least all frequentist statistical analyses (definitely the ones in this book!), and most apply to non-frequentist results as well. As an example, suppose you asked the question, ``Is the average height of male students the same as female students in a pool of randomly selected Biology majors?'' During your study you collected height data from random samples (100 each) of male and female students. You then visualized the data using the appropriate plots, calculated descriptive statistics, and performed your hypothesis test. In your results section you would would include the figures, perhaps a table of your descriptive statistics for those samples (e.g.~mean, standard error of the mean, n, range, etc), and a declaration of your hypothesis test result with the formal details (effect size, test statistic, degrees of freedom, and \emph{p}-value) to support it. Don't forget to refer to your tables or figures as you state the main results in the text. Suppose you found that male Biology majors are, on average, 12.5 cm taller than female majors, and you rejected your null hypothesis of no difference in means. Declaring that males were taller by an average of 12.5 cm is the most important message, and the statistical details (which give support and clarification of your conclusion) come after that statement. When stating a main result, make sure that you actively state it as your (or your and your co-authors') finding. A statistical test doesn't ``do'' anything iteself, so it weakens the strength and confidence of your statement if you say, ``An ANOVA showed that males were significantly taller than females\ldots{}'' Instead write something like, ``We found that male Biology students were significantly taller than female Biology students, by an average of 12.5 cm (single-factor ANOVA, \(F=59.9\), \(d.f.=1;198\), \(p=1.23*10^{-8}\)).'' If the means and standard errors were not reported in a table or elsewhere, you could also have included them parenthetically in that sentence. Also, degrees of freedom can alternatively be reported as subscripts to the test statistic symbol (i.e. \(F_{1,198}=59.9\)). Below are some bulleted guidelines with good additional details about presentation of statistical analysis results. \hypertarget{differences-directionality-and-magnitude}{% \subsection{Differences, directionality, and magnitude}\label{differences-directionality-and-magnitude}} \begin{itemize} \item Emphasize clearly the nature of differences or relationships. \item If you are testing for differences among groups, and you find a significant difference, it is not sufficient to simply report that ``groups A and B were significantly different''. How are they different and by how much? \item It is much more informative to say ``Group A individuals were 23\% larger than those in Group B'', or, ``Group B pups gained weight at twice the rate of Group A pups.'' \item Report the direction of differences (greater, larger, smaller, etc) and the magnitude of differences (\% difference, how many times, etc.) whenever possible. \end{itemize} \hypertarget{other-statistical-results-reporting-formalities}{% \subsection{Other statistical results reporting formalities}\label{other-statistical-results-reporting-formalities}} \begin{itemize} \item Always enter the appropriate units when reporting data or summary statistics. For an individual value you would write, ``\ldots{}the mean length was 10 cm'', or, ``\ldots{}the maximum time was 140 min.'' \item When including a measure of variability, place the unit after the error value, e.g., ``\ldots{}was 10 ± 2.3 m''. \item Likewise place the unit after the last in a series of numbers all having the same unit. For example: ``\ldots{}lengths of 5, 10, 15, and 20 m'', or ``\ldots{}no differences were observed after 2, 4, 6, or 8 min. of incubation''. \end{itemize} \hypertarget{exercises-associated-with-this-chapter-9}{% \section{Exercises associated with this chapter:}\label{exercises-associated-with-this-chapter-9}} \begin{itemize} \tightlist \item Problem Set 4 \end{itemize} \hypertarget{additional-learning-resources-9}{% \section{Additional learning resources:}\label{additional-learning-resources-9}} \begin{itemize} \tightlist \item Logan, M. 2010. Biostatistical Design and Analysis Using R. - A great intro to R for statistical analysis \end{itemize} \bibliography{book.bib,packages.bib} \end{document}
{ "alphanum_fraction": 0.7640659371, "avg_line_length": 81.9374057315, "ext": "tex", "hexsha": "f63509af406c570c054d90045e6d79bc9a9a961e", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2022-02-02T19:12:08.000Z", "max_forks_repo_forks_event_min_datetime": "2022-02-02T19:12:08.000Z", "max_forks_repo_head_hexsha": "44cae9fbe2f1019ee7c7f4b3e3515a70dd197490", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "clay-small/Found_Stat", "max_forks_repo_path": "docs/foundational_statistics.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "44cae9fbe2f1019ee7c7f4b3e3515a70dd197490", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "clay-small/Found_Stat", "max_issues_repo_path": "docs/foundational_statistics.tex", "max_line_length": 2356, "max_stars_count": null, "max_stars_repo_head_hexsha": "44cae9fbe2f1019ee7c7f4b3e3515a70dd197490", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "clay-small/Found_Stat", "max_stars_repo_path": "docs/foundational_statistics.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 83822, "size": 325947 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % HBOOK User Guide -- LaTeX Source % % % % Chapter 1 % % % % The following external EPS files are referenced: % % hbbatch.eps, hbookc11.eps % % % % Editor: Michel Goossens / CN-AS % % Last Mod.: 20 October 1993 9:00 mg % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \Filename{H1Introduction} \chapter{Introduction} \label{HINTRO} Data processing is an important aspect of particle physics experiments since the volume of data to be handled is quite large, a single LEP experiment producing of the order of a terabyte of data per year. As a result, every particle physics laboratory has a large data processing centre even though more than $50\%$ of the computation is actually carried on in universities or other research establishments. Particle physicists from various countries are in close contact on a continental and world wide basis, the information exchanged being mainly via preprints and conferences. The similarities in experimental devices and problems, and the close collaboration, favour the adoption of common software methodologies that sometimes develop into widely used standard packages. Examples are the histograming, fitting and data presentation package HBOOK, its graphic interface \HPLOT~\cite{bib-HIGZHPLOT} and the Physics Analysis Workstation (\PAW) system~\cite{bib-PAW}, which have been developed at CERN. HBOOK is a subroutine package to handle statistical distributions (histograms and Ntuples) in a Fortran scientific computation environment. It presents results graphically on the line printer, and can optionally draw them on graphic output devices via the \HPLOT{} package. \PAW{} integrates the functionalities of the \HBOOK{} and \HPLOT{} (and other) packages into an interactive workstation environment and provides the user with a coherent and complete working environment, from reading a (mini)DST, via data analysis to preparing the final data presentation. These packages are available from the CERN Program Library (see the copyright page for conditions). They are presently being used on several hundred different computer installations throughout the world. \Filename{H2Data-processing-flow} \Section{4cm}{Data processing flow in particle experiments} \label{HDATPROC} In the late sixties and early seventies a large fraction of particle physicists were active in bubble chamber physics. The number of events they treated varied between a few hundreds (neutrino) to several tens of thousands (e.g. strong interaction spectroscopy). Normally users would reduce there raw ``measurement'' tapes \index{DST} after event reconstruction onto Data Summary Tapes (DST) and extract from there mini and micro DSTs, which would then be used for analysis. In those days a statistical analysis program SUMX~\cite{bib-SUMX} would read each event and compile information into histograms, two-dimensional scatter diagrams and `ordered lists'. Facilities were provided (via data cards) to select subset of events according to criteria and the user could add routines for computing, event by event, quantities not immediately available. Although the idea and formalism of specifying cuts and selection criteria in a formal way were a very nice idea, the computer technology of those days only allowed the data to be analysed in batch mode on the CDC or IBM mainframes. Therefore it was not always very practical to run several times through the data and a more lightweight system HBOOK~\notHTML{\cite{bib-HBOOK1,bib-HBOOK2}}, easier to learn and use, was soon developed. It was in the middle seventies, when larger proton and electron accelerators became available, that counter experiments definitively superseded bubble chambers and with them the amount of data to be treated was now in the multi megabyte range. Thousands of raw data tapes would be written, huge reconstruction programs would extract interesting data from those tapes and transfer them to DSTs. Then, to make the analysis more manageable, various physicists would write their own mini-DST, with a reduced fraction of the information from the DST. They would run these (m,$\mu$)DSTs through HBOOK, whose functionality had increased substantially in the meantime~\notHTML{\cite{bib-HBOOK3,bib-HBOOK3R}}. Hence various tens of one- or two-dimensional histograms would be booked in the initialization phase and the interesting parameters would be read sequentially from the DST and be binned in the histograms or scatter plots. Doing this was very efficient memory wise (although 2-dim. histograms could still be very costly), but of course all correlations, not explicitly plotted, were lost. HBOOK in those days still had its own memory management, but with version~4~\cite{bib-HBOOK4}, which became available in 1984, the ZEBRA data memory manager was introduced. This not only allowed the use of all memory managament facilities of ZEBRA, but at the same time it became possible to use the sequential FZ and random access RZ~\cite{bib-ZEBRA} input-output possiblities of that system. This allows ``histograms'' to be saved and transferred to other systems in an easy way. At about the same time Ntuples, somewhat similar in functionality to ``events'' as written on a miniDST were implemented. This way the complete correlation matrix between the various Ntuple elements can be reconstructed at will. The last few years multi Mflop machines have become available on the desktop, and ``farms'' of analysis machines are being set up to ``interactively'' reconstruct events directly from the raw data as registered in the experimental setup, hence bypassing the ``batch'' reconstruction step. The first Ntuple implementation can be thought of as a static large two-dimensional array, one dimension representing the number of events and the other a number of characteristics (floating point numbers) stored for each event. With the present version of HBOOK Ntuples can contain complex substructures of different data types, which allow a certain dynamicity. Moreover tools have been developed to dynamically share data between various processes (Unix) or global sections (VMS). This makes it now possible to sample events as they are registered in the experimental setup or, when the computing power is available, to reconstruct, vizualise and analize events in real time as they are recorded in the experimental apparatus. It is expected that this will progressively eliminate the intermediate Batch/DST analysis step and allow, with the help of Monte Carlo events and calibration data, an (almost) immediate response to the data taking needs of a large experiment. \Filename{H2Output-options} \Section{4cm}{HBOOK and its output options} \label{HOUTOPTS} The HBOOK system consists of a few hundred Fortran subroutines which enable the user to symbolically define, fill and output one- and two-dimensional density estimators, under the form of {\bf histograms}, {\bf scatter-plots} and {\bf tables} and to handle Ntuples. \index{histogram} \index{scatter-plot} \index{table} \index{Ntuple} Some interesting features of HBOOK are: \begin{UL} \item The basic operations require the knowledge of just a few subroutine calls that can be learned in half an hour, reading a few pages of documentation. The internal structure of the package is also such that the options that are not directly called by the user program are not loaded in memory. \item Histograms and plots are represented on the line printer in a standard format that contains the picture and some numerical information. Several options are available to modify the presentation, mainly in the case of one dimensional histograms. By default, one histogram per page is printed, writing a possible common title, date, individual title, drawing the countour of the histogram between the minimum and maximum channel content, with the contents scale adjusted to fit in one page, followed by channel number, contents and scale, and some statistical information (entries, mean value, standard deviation and so on). If the number of channels is greater than 100, the histogram is printed on several pages. \item Printing options permit to add or suppress some information, choose a different graphic presentation and modify the mapping of histograms on output pages. Histograms can also be printed with channels oriented along rows instead of columns, to avoid splitting the ones with many channels. Logarithmic contents scale can be selected. Various alternative output choices are illustrated in the examples. \end{UL} About 120 subroutines are directly accessible to the user program, via Fortran calls of the type \begin{center} \fbox{\Lit{CALL H.....(P1,P2,..)}} \end{center} This is the only interface between the Fortran program and the dynamic data structure managed by HBOOK, which can remain hidden from the average user. \subsection*{The functionality of HBOOK} The various user routines of HBOOK can be subdivided by functionality as follows: \begin{DL}{Random number generation} \item[Booking] Declare a one- or two-dimensional histogram or a Ntuple. \item[Projections] Project two-dimensional distributions onto both axes. \item[Ntuples] Way of writing micro data-summary-files for further processing. This allows projections of individual variables or correlation plots. Selection mechanisms may be defined. \item[Function representation] Associates a real function of 1 or 2 variables to a histogram. \item[Filling] Enter a data value into a given histogram, table or Ntuple. \item[Access to information] Transfer of numerical values from HBOOK-managed memory to Fortran variables and back. \item[Arithmetic operations] On histograms and Ntuples. \item[Fitting] Least squares and maximum likelihood fits of parametric functions to histogramed data. \item[Monte Carlo testing] Fitting with finite Monte Carlo statistics. \item[Differences between histograms] Statistical tests on the compatibility in shape between histograms using the Kolmogorov test. \item[Parameterization] Expresses relationships between as linear combinations of elementary functions. \item[Smoothing] Splines or other algorithms. \item[Random number generation] Based on experimental distributions. \item[Archiving] Information is stored on mass storage for further reference in subsequent programs. \item[Editing] Choice of the form of presentation of the histogramed data. \end{DL} \Filename{H2What-you-should-know} \Section{4cm}{What you should know before you start} The basic data elements of HBOOK are the {\bf histogram} (one- and two-dimensional) and the {\bf Ntuple}. The user identifies his data elements using a {\bf single integer}. Each of the elements has a number of {\bf attributes} associated with it. \index{histogram} \index{Ntuple} The package is organised as part of a {\bf library}, from which at load time unsatisfied externals are searched and loaded. In this way only those subroutines actually used will be loaded, therefore minimising the space occupied in memory by the code. Unfortunately, given the way Fortran works and although the package is structured as much as possible in the sense of selective loading, some unused subroutines will usually be present. \newpage HBOOK uses the ZEBRA \cite{bib-ZEBRA} data structure management package to manage its memory (see chapter \ref{HMEMORYM}). The working space of HBOOK is an array, allocated to the labelled common \Lit{/PAWC/}. \index{common {\tt/PAWC/}}\index{PAWC@{\tt/PAWC/} common} In ZEBRA terms this is a ZEBRA store. It is thus necessary to reserve as many locations as required with a declarative statement in the main program. The actual length of the common is defined most safely via a \Lit{PARAMETER} statement, as shown below: \begin{center} \begin{tabular}{|>{\tt}l|} \hline PARAMETER (NWPAWC = 50000)\\ COMMON /PAWC/ HMEMOR(NWPAWC)\\ \hline \end{tabular} \end{center} \index{common {\tt/PAWC/}}\index{PAWC@{\tt/PAWC/} common} Furthermore HBOOK must be informed of the storage limit via a call to \Rind{HLIMIT}. This is discussed in detail in section \vref{HMEMORYS}. In the case above this would correspond to \begin{center} \fbox{CALL HLIMIT(NWPAWC)} \end{center} \Rind[HLIMIT]{} At execution time, when histograms are booked, they are accomodated in common \Lit{/PAWC/} in booking order, up to the maximum size available. \index{common {\tt/PAWC/}}\index{PAWC@{\tt/PAWC/} common} Note that a call to \Rind{HLIMIT} will automatically initialise the ZEBRA system via a call to the routine \Rind{MZEBRA}. If ZEBRA has already been initialised, (\Rind{MZEBRA} has already been called), then \Rind{HLIMIT} should be called with a {\bf negative} number indicating the number of words required, e.g. \begin{center} \fbox{CALL HLIMIT(-NWPAWC)} \end{center} \Rind[HLIMIT]{} \subsection{HBOOK parameter conventions} \subsubsection*{Histogram or Ntuple Identiers} Histograms and Ntuples in HBOOK are identified by a positive or negative integer. Thus the histogram identifier \Lit{ID = 0} is illegal at {\bf booking} time. However it is a convenient way to specify that the option or operation applies to {\bf all} known histograms in the current working directory (e.g. output, input, printing). All routines for which a zero identifier is meaningful are mentioned explicitly. \index{histogram!identifier} \index{Ntuple!identifier} \index{identifier} \subsubsection*{Parameter types} In agreement with the Fortran standard, when calling an HBOOK routine the type of each parameter must correspond to the one described in the routine's calling sequence in this manual. Unless explicitly stated otherwise, parameters whose names start with \Lit{I, J, K, L, M} or \Lit{N} are {\bf integer}, the rest {\bf real}, with the exception of those beginning with the string \Lit{CH}, which correspond to character constants. \index{parameter type} \index{Fortran convention} \index{real type} \index{character type} \index{integer type} \index{type!parameter} \index{type!real} \index{type!character type} \index{type!integer} \subsubsection*{Data packing} \index{packing} \index{VMX@{\tt VMX}} All booking commands that reserve space for histograms or plots require the ``packing'' parameter \Lit{VMX}. It corresponds to the estimated maximum population of a single bin, on the basis of which a suitable number of bits per channel will be allocated. This allows several channels to be packed in one machine word, and thus to require less storage space (at the expense of packing and unpacking processing time). A value \Lit{VMX=0.0} signals that no packing is to be performed and that each histogram channel will occupy one machine word. \newpage \Filename{H2A-basic-example} \section{A basic example} \label{HSIMPLEXA} Below a simple example is given describing how to use HBOOK for booking, filling and printing simple histograms. After telling HBOOK the length of the \Lit{/PAWC/} common block \index{common {\tt/PAWC/}}\index{PAWC@{\tt/PAWC/} common} to be \Lit{10000} words with a call to \Rind{HLIMIT}, a global title to appear on all histograms is specified by calling \Rind{HTITLE}. Next a 100 bin one-dimensional histogram with identifier 10 is booked with a call to \Rind{HBOOK1}, followed by the booking using a call to \Rind{HBOOK2} of a two-dimensional histogram with identifier 20 and consisting of 100 times 40 cells. The \Lit{DO}-loop labelled 10 fills the one-dimensional histogram 10, while the nested \Lit{DO} loops labelled 20 and 30 look after filling the two-dimensional histogram 20. In both cases a call is made to routine \Rind{HFILL}. Finally a call to \Rind{HISTDO} writes an index with information about all histograms as well as a lineprinter representation of the histograms on standard output. \begin{XMPt}{Example of how to produce simple histograms} PROGRAM HSIMPLE * PARAMETER (NWPAWC = 10000) COMMON/PAWC/H(NWPAWC) *.___________________________________________ CALL HLIMIT(NWPAWC) * Set global title * CALL HTITLE('EXAMPLE NO = 1') * Book 1-dim histogram and scatter-plot * CALL HBOOK1(10,'EXAMPLE OF 1-DIM HISTOGRAM',100,1.,101.,0.) CALL HBOOK2(20,'EXAMPLE OF SCATTER-PLOT',100,0.,1.,40,1.,41.,30.) * Fill 1-dim histogram * DO 10 I=1,100 W=10*MOD(I,25) CALL HFILL(10,FLOAT(I)+0.5,0.,W) 10 CONTINUE * Fill scatter-plot * X=-0.005 DO 30 I=1,100 X=X+0.01 DO 20 J=1,40 Y=J IW=MOD(I,25)*MOD(J,10) IWMAX=J-MOD(I,25)+10 IF(IW.GT.IWMAX)IW=0 CALL HFILL(20,X,Y,FLOAT(IW)) 20 CONTINUE 30 CONTINUE * Print all histograms with an index * CALL HISTDO END \end{XMPt} \newpage%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{Listing} EXAMPLE NO = 1 ............................................................................................................................. . . . HBOOK HBOOK CERN VERSION 4.13 HISTOGRAM AND PLOT INDEX 17/12/91 . . . ............................................................................................................................. . . . NO TITLE ID B/C ENTRIES DIM NCHA LOWER UPPER ADDRESS LENGTH . . . ............................................................................................................................. . . . . . 1 EXAMPLE OF 1-DIM HISTOGRAM 10 32 100 1 X 100 0.100E+01 0.101E+03 79369 149 . . . . . . 2 EXAMPLE OF SCATTER-PLOT 20 5 4000 2 X 100 0.000E+00 0.100E+01 79217 760 . . Y 40 0.100E+01 0.410E+02 78482 726 . . . ............................................................................................................................. MEMORY UTILISATION MAXIMUM TOTAL SIZE OF COMMON /PAWC/ 80000 EXAMPLE NO = 1 -------------- EXAMPLE OF 1-DIM HISTOGRAM HBOOK ID = 10 DATE 17/12/91 NO = 1 250 240 - - - - 230 -I -I -I -I 220 -II -II -II -II 210 -I I -I I -I I -I I 200 -I I -I I -I I -I I 190 -I I -I I -I I -I I 180 -I I -I I -I I -I I 170 -I I -I I -I I -I I 160 -I I -I I -I I -I I 150 -I I -I I -I I -I I 140 -I I -I I -I I -I I 130 -I I -I I -I I -I I 120 -I I -I I -I I -I I 110 -I I -I I -I I -I I 100 -I I -I I -I I -I I 90 -I I -I I -I I -I I 80 -I I -I I -I I -I I 70 -I I -I I -I I -I I 60 -I I -I I -I I -I I 50 -I I -I I -I I -I I 40 -I I -I I -I I -I I 30 -I I -I I -I I -I I 20 -I I -I I -I I -I I 10 -I I -I I -I I -I I CHANNELS 100 0 1 10 0 1 2 3 4 5 6 7 8 9 0 1 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 CONTENTS 100 111111111122222 111111111122222 111111111122222 111111111122222 10 123456789012345678901234 123456789012345678901234 123456789012345678901234 123456789012345678901234 1. 000000000000000000000000 000000000000000000000000 000000000000000000000000 000000000000000000000000 LOW-EDGE 100 1 10 1111111111222222222233333333334444444444555555555566666666667777777777888888888899999999990 1. 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 * ENTRIES = 100 * ALL CHANNELS = 0.1200E+05 * UNDERFLOW = 0.0000E+00 * OVERFLOW = 0.0000E+00 * BIN WID = 0.1000E+01 * MEAN VALUE = 0.5433E+02 * R . M . S = 0.2854E+02 \newpage EXAMPLE NO = 1 -------------- EXAMPLE OF SCATTER-PLOT HBOOK ID = 20 DATE 17/12/91 NO = 2 CHANNELS 100 U 0 1 O 10 N 0 1 2 3 4 5 6 7 8 9 0 V 1 D 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 E ************************************************************************************************************ OVE * * OVE 40 * * 40 39 * 9IR* 9IR* 9IR* 9IR* * 39 38 * 8GO** 8GO** 8GO** 8GO** * 38 37 * 7ELS* 7ELS* 7ELS* 7ELS* * 37 36 * 6CIOU* 6CIOU* 6CIOU* 6CIOU* * 36 35 * 5AFKPU* 5AFKPU* 5AFKPU* 5AFKPU* * 35 34 * 48CGKOS* 48CGKOS* 48CGKOS* 48CGKOS* * 34 33 * 369CFILORU 369CFILORU 369CFILORU 369CFILORU * 33 32 * 2468ACEGIKMOQS 2468ACEGIKMOQS 2468ACEGIKMOQS 2468ACEGIKMOQS * 32 31 * +23456789ABCDEFGHIJK +23456789ABCDEFGHIJK +23456789ABCDEFGHIJK +23456789ABCDEFGHIJK * 31 30 * * 30 29 * 9IR 9IR 9IR 9IR * 29 28 * 8GO* 8GO* 8GO* 8GO* * 28 27 * 7ELS 7ELS 7ELS 7ELS * 27 26 * 6CIOU 6CIOU 6CIOU 6CIOU * 26 25 * 5AFKP 5AFKP 5AFKP 5AFKP * 25 24 * 48CGKO 48CGKO 48CGKO 48CGKO * 24 23 * 369CFILO 369CFILO 369CFILO 369CFILO * 23 22 * 2468ACEGIK 2468ACEGIK 2468ACEGIK 2468ACEGIK * 22 21 * +23456789ABCDEF +23456789ABCDEF +23456789ABCDEF +23456789ABCDEF * 21 20 * * 20 19 * 9I 9I 9I 9I * 19 18 * 8GO 8GO 8GO 8GO * 18 17 * 7EL 7EL 7EL 7EL * 17 16 * 6CI 6CI 6CI 6CI * 16 15 * 5AFK 5AFK 5AFK 5AFK * 15 14 * 48CG 48CG 48CG 48CG * 14 13 * 369CF 369CF 369CF 369CF * 13 12 * 2468ACE 2468ACE 2468ACE 2468ACE * 12 11 * +23456789A +23456789A +23456789A +23456789A * 11 10 * * 10 9 * 9 9 9 9 * 9 8 * 8G 8G 8G 8G * 8 7 * 7E 7E 7E 7E * 7 6 * 6C 6C 6C 6C * 6 5 * 5A 5A 5A 5A * 5 4 * 48 48 48 48 * 4 3 * 369 369 369 369 * 3 2 * 2468 2468 2468 2468 * 2 1 * +2345 +2345 +2345 +2345 * 1 UND * * UND ************************************************************************************************************ LOW-EDGE 0 0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999 0 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 * I I * ENTRIES = 4000 PLOT ---------I---------I--------- * SATURATION AT= 31 I 10488I * SCALE .,+,2,3,.,., A,B, STATISTICS ---------I---------I--------- * STEP = 1.00 * MINIMUM=0.000 I I \end{Listing} \newpage%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \Filename{H2HBOOK-batch} \section{HBOOK batch as the first step of the analysis} \begin{Fighere} \begin{center}\mbox{\epsfig{file=hbbatch.eps,width=13cm}}\end{center} \caption{Schematic presentation of the various steps in the data analysis chain} \label{FBATCH} \end{Fighere} Although it is possible to define histograms interactively in a PAW session, and then read the (many thousands of) events, in general for large data samples the relevant variables are extracted from the {\bf Data Summary Files {\rm or} DST}s and stored in {\bf histograms} \index{DST} or an {\bf Ntuple}. Histograms require to make a certain choice as to the range of values for the plotted parameter, because the {\bf binning}, or the coarseness, of the distribution has to be specified when the histogram is defined ({\bf booked}). Also only one- and two-dimensional histograms are possible, hence the correlations between various parameters can be difficult to study. Hence in many cases it is more appropriate to store the value of the important parameters for each event in an {\bf Ntuple}. This approach preserves the correlation between the parameters and allows selection criteria to be applied on the (reduced) data sample at a later stage. In general, the time consuming job of analysing all events available on tape is run on a mainframe or CPU server, and the important event parameters are stored in a Ntuple to allow further detailed study. For convenience the Ntuple can be output to disk for each run, and then at a later stage the Ntuples can be {\bf merged} in order to allow a global interactive analysis of the complete data sample (see Figure \ref{FBATCH}). \vspace*{\baselineskip} A typical batch job in which data are analysed offline and some characteristics are stored in HBOOK is shown in Figure~\ref{FEX1IN}. HBOOK is initialised by a call to~\Lit{HLIMIT}, which declares a length of 20000 words for the length of the \Lit{/PAWC/} dynamic store. Then the one- and two- dimensional histograms 110 and 210 are filled respectively according to the functions \Lit{HTFUN1} and \Lit{HTFUN2} and the histograms are output to a newly created file \Lit{HTEST.DAT}. The output generated by the program is shown in Figure~\ref{FEX1OU}. \begin{figure}[p] \begin{XMPfont}{8} PROGRAM HTEST PARAMETER (NWPAWC=20000) COMMON/PAWC/H(NWPAWC) EXTERNAL HTFUN1,HTFUN2 *.------------------------------------------------------------ CALL HLIMIT(NWPAWC) * Book histograms and declare functions CALL HBFUN1(100,'Test of HRNDM1',100,0.,1.,HTFUN1) CALL HBOOK1(110,'Filled according to HTFUN1',100,0.,1.,1000.) CALL HBFUN2(200,'Test of HRNDM2',100,0.,1.,40,0.,1.,HTFUN2) CALL HSCALE(200,0.) CALL HBOOK2(210,'Filled according to HTFUN2',100,0.,1.,40,0.,1.,30.) * Fill histograms DO 10 I=1,10000 X=HRNDM1(100) CALL HFILL(110,X,0.,1.) CALL HRNDM2(200,X,Y) CALL HFILL(210,X,Y,1.) 10 CONTINUE * Save all histograms on file HTEST.DAT CALL HRPUT(0,'HTEST.DAT','N') CALL HDELET(100) CALL HDELET(200) CALL HPRINT(0) END FUNCTION HTFUN2(X,Y) * Two-dimensional gaussian HTFUN2=HTFUN1(X)*HTFUN1(Y) RETURN END FUNCTION HTFUN1(X) * Constants for gaussians DATA C1,C2/1.,0.5/ DATA XM1,XM2/0.3,0.7/ DATA XS1,XS2/0.07,0.12/ * Calculate the gaussians A1=-0.5*((X-XM1)/XS1)**2 A2=-0.5*((X-XM2)/XS2)**2 X1=C1 X2=C2 IF(ABS(A1).GT.0.0001)X1=C1*EXP(A1) IF(ABS(A2).GT.0.0001)X2=C2*EXP(A2) * Return function value HTFUN1=X1+X2 RETURN END \end{XMPfont} \caption{Writing data to HBOOK with the creation of a HBOOK RZ file} \label{FEX1IN} \NODOC{\begin{minipage}[t]{.495\textwidth}} \begin{XMPfrac}{3.3} Filled according to HTFUN1 HBOOK ID = 110 DATE 02/09/89 NO = 2 340 - 330 I - 320 I I 310 I I 300 I-I- 290 --I I 280 -I I- 270 I I 260 I I 250 -I I- 240 I I 230 -I I 220 I I- 210 -I I 200 I I - 190 I I-I 180 -I I 170 I I - 160 I I - -I- - 150 I I- I --I I- -I - 140 I I- -I--I I-II-I- 130 --I I- -I I 120 I I - -I I 110 I I I-I I-- 100 I I- -I I 90 -I I- -I I---- 80 -I I --I I- 70 I I -I I 60 -I I-- - I I- - 50 -I I-- ----I-I I-I- 40 I I-I I--- 30 --I I-- 20 --I I -- 10 -------I I-II-- CHANNELS 100 0 1 10 0 1 2 3 4 5 6 7 8 9 0 1 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 CONTENTS 100 111222222323222211111 1111111111111111111111 10 1 12224578227034888392975189442985544344445467789101235335456543453430088887545443322111 1. 22345055038484428230601947383077660674994445157562761227948358021717653142735611669210337304276 LOW-EDGE 1. 111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999 *10** 1 0 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 * ENTRIES = 10000 * ALL CHANNELS = 0.1000E+05 * UNDERFLOW = 0.0000E+00 * OVERFLOW = 0.0000E+00 * BIN WID = 0.1000E-01 * MEAN VALUE = 0.4846E+00 * R . M . S = 0.2199E+00 \end{XMPfrac} \NODOC{\end{minipage}\hfill} \NODOC{\begin{minipage}[t]{.495\textwidth}} \begin{XMPfrac}{3.3} Fill according to HTFUN2 HBOOK ID = 210 DATE 02/09/89 NO = 4 CHANNELS 100 0 1 10 0 1 2 3 4 5 6 7 8 9 0 1 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 ******************************************************************************************************** OVE * * OVE .975 * * 40 .95 * ++ 2 2 2++ +3 + ++ + + 2+ 3 2 + 2++++ + 2 + * 39 .925 * + + 2 ++ 32+++ +22 22+ +++ + + + + 22+2+++ +2++ + + + * 38 .9 * 223 +3+ +3 3++333223 +2 2 + + ++2+ + 232+322 2+++ +24+ + * 37 .875 * + ++ +2++++ 342533 443224++2 2 + + ++23 + +42+3222233+++3+++2 22+ ++ + + + * 36 .85 * ++ + 5+35+3333483475 65+2+ + ++ + +33+3 +2 +2335222+235 522 24+ ++ 2 * 35 .825 * ++ 2+2 558335876736583+ 2 +2+ + + 3 224+533623+35252+54 32+452++3 332 +++++ * 34 .8 * ++ + 532 656562546C8A88936324332+ +2+23 +332+2236433657234455556+4635+222 +23 +3 + * 33 .775 * +2 33 375B7274C6A66A782+323++2+23 +5++3+5222256768365258276374+86334+ 32 +++ + * 32 .75 * + 2+ 2 45523786A79FB98B6AD4855224+ + ++23323+5755552468283746644543 443324 5223++ 2 * 31 .725 * + ++4+22+637A785B8BBBA6B4656922++ 2 23 24 2+5464+435552843286C6246623636+3+ 2 3 2 3+2 * 30 .7 * + 22 +2 735ABCA89G8C8A6DA5765+3+322 2+2++52234445475+355864768724+B74632+23 +3 3+ + * 29 .675 * 23 +4+3364HBBAFCFCBB98945C7933++ 2 5+3 +4225243752 75787896C367+475443+32242422 2 + * 28 .65 * + + ++5+3795498GAC96CB9A79E6645 34 3+3 ++24537234424532777657445+4746235+2+3++ 4+2 2 * 27 .625 * + 3 647774A9CE67G99BAB6B233233 4+ 2 322 42 44364+657735+735736733+4+23234 +++++2 + * 26 .6 * + ++3+342233874B8C966896565+5242+5 +2+++++2+5225+42544535456A265357253+2222+ 2+2++ + +2 * 25 .575 * ++ + +5 74535525677984573453422 +2 ++ 2 +++4+2 3526525235+4243342+32+ 23 2+ * 24 .55 * ++ +226+584568349865+433 +2222 + ++ +4444352326542332823+444332 +2 2 + + * 23 .525 * ++++2+65436+3A753535+22+++2+++ ++ + ++2 +2 ++4++2+ 224224+32 2+ ++++ 2 + * 22 .5 * 22 4+23+6425 84543+++42 +2 +++2 2 + 2+2+ 3+ 24++2334223+ 223 +2 + + * 21 .475 * + +5334+7333+22 ++2+ + 3+ 2 +4 +32 2 222+2 + 33++ 222 + +3++ + * 20 .45 * + 433244397 2++23232+ 24 +2 ++ ++2+ 2+ +2+33 ++4 +3 ++2+3 + + * 19 .425 * + ++ 2+ 22+24636432646+5+322 4 +++ + 2++ ++ +22+533+3++3+ +432 +322++2+ 2+ ++ + * 18 .4 * +++3237549588A9725H724545++33+33 + + 2 24 4 +A4633 39 25636343322+82++ ++ + +2+ + * 17 .375 * +++3+374879CCCADLD48996CE54365232 +2+2342347+563264636547B47925542444434+2+322 2+ +2 * 16 .35 * +++ +4637549EC87D8IHDICI9B754655432++23233+2554368886H68B9667889677A635C+4+223333+22 + * 15 .325 * + ++++ 2445949CHHDFNHJRHIHKLDD5DC3545422233 24564875549A8E7899B4F4BC3CA7E597842+67242+++++ * 14 .3 * ++++++2667889EDFEHULQHI*IKFIFA878666336+6+48526B79777BCCEBBAEEED58E96997A4674763463++++ 2+ * 13 .275 * + ++++ 3546898BEMPNIURPH*NOECDC8958E442+3542+68554B37466AAGCEEACAC7A476599962365 343++2 +2 * 12 .25 * + 2344658A9DAJPLDENQGDHJEEBAA93 +3225322+4259A576784DA9B98B56A85CD859797A5843523223+ 22 * 11 .225 * 3 256778BA6CEJGIEAICGCHA4A242+43+++52427545466927A78866BB66795655763454656 2 3 +++ * 10 .2 * +2++4357A69BC88AAFAA5665432+434 +++ ++++343233668554584442CA7664745+4++34+++2 + +++ * 9 .175 * + 3 3436344766755264526++3 2+ + ++ +42 22 2+32345++353562 34 33+++4 +3 +++ + * 8 .15 * 2+ + +3+44+262542+4225 232 ++++ 222 + 2+ +23+242 32+222 2++342 22 22+ 2 + * 7 .125 * + +2 +++22+32+ 3+++2 + +42 + 2+ + + 2+ + + ++ * 6 .1 * + + + +2+ ++ + +2+ + ++ +++ + * 5 .075 * + 2 + + + + * 4 .05 * + * 3 .025 * + * 2 * * 1 UND * * UND ******************************************************************************************************** LOW-EDGE 0 0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999 0 0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 * I I * ENTRIES = 10000 PLOT ---------I---------I--------- * SATURATION AT= 31 I 9991 I * SCALE .,+,2,3,.,., A,B, STATISTICS ---------I---------I--------- * STEP = 1 * MINIMUM=0 I I \end{XMPfrac} \NODOC{\end{minipage}} \caption{Output generated by job HTEST} \label{FEX1OU} \end{figure} \clearpage \Subsection{4cm}{Adding some data to the RZ file} A second run using program \Lit{HTEST1} below shows how to add some data to the HBOOK~RZ~file created in the job \Lit{HTEST} (Figure~\ref{FEX1IN}). After opening the file \Lit{HTEST.DAT}, created in the previous run, in update mode (\Lit{'U'} option) with the name \Lit{EXAM2}, a new directory \Lit{NTUPLE} is created, known as \Lit{//EXAM2/NTUPLE} as seen in the output of \Lit{HLDIR} command at the end of the output. One-dimensional (10) and two-dimensional (20) histograms and an Ntuple (30) are booked. Each Ntuple element or ``event'' is characterised by three {\bf variables} (labelled \Lit{'X'}, \Lit{'Y'} and \Lit{'Z'}). The Ntuple data, when the initial size of \Lit{1000} words is exhausted, will be written to the directory on disk specified in the call to \Lit{HBOOKN}, i.e. \Lit{//EXAM2/NTUPLE}, and the data in memory are replaced with those newly read. A one- and a two-dimensional projection of \Lit{X} and \Lit{X/Y} are then made onto histograms 10 and 20 respectively, before they are printed and written on the HBOOK RZ file. At the end the {\bf current} and {\bf parent} directories are listed. The contents of the latter shows that the data written in the first job (\Lit{HTEST}) are indeed still present in the file under the top directory \Lit{//EXAM2}. The call to \Lit{RZSTAT} shows usage statistics about the RZ file. \begin{XMPt}{Example of adding data to a HBOOK RZ file} PROGRAM HTEST1 PARAMETER (NWPAWC=20000) COMMON/PAWC/H(NWPAWC) DIMENSION X(3) CHARACTER*8 CHTAGS(3) DATA CHTAGS/' X ',' Y ',' Z '/ *.---------------------------------------------------- CALL HLIMIT(NWPAWC) * Reopen data base LRECL = 0 CALL HROPEN(1,'EXAM2','HTEST.DAT','U',LRECL,ISTAT) CALL HMDIR('NTUPLE','S') CALL HBOOK1(10,'TEST1',100,-3.,3.,0.) CALL HBOOK2(20,'TEST2',30,-3.,3.,30,-3.,3.,250.) CALL HBOOKN(30,'N-TUPLE',3,'//EXAM2/NTUPLE', + 1000,CHTAGS) * DO 10 I=1,10000 CALL RANNOR(A,B) X(1)=A X(2)=B X(3)=A*A+B*B CALL HFN(30,X) 10 CONTINUE * CALL HPROJ1(10,30,0,0,1,999999,1) CALL HPROJ2(20,30,0,0,1,999999,1,2) CALL HPRINT(0) CALL HROUT(0,ICYCLE,' ') CALL HLDIR(' ',' ') CALL HCDIR('\bs',' ') CALL HLDIR(' ',' ') CALL RZSTAT(' ',999,' ') CALL HREND('EXAM2') END \end{XMPt} \begin{Fighere} \NODOC{\begin{minipage}[t]{.525\textwidth}} \begin{XMPfrac}{3.2} TEST1 HBOOK ID = 10 DATE 02/09/89 NO = 1 280 270 - - 260 I I - 250 - I I I 240 - I I-I- I - 230 I-I--I I I-I- 220 -I I I I- 210 I I I I- 200 I I-I I- 190 - - --I I -- 180 I-I-I I-II-- 170 I I 160 I I-- 150 - -I I -- 140 -I-I I II 130 -I I-II- 120 -I I- 110 --I I-- 100 --I I 90 I I 80 I I---- 70 --I I- 60 -I I-- 50 ---I I-- 40 -----I I-- 30 I I----- 20 - ----I I--- 10 --------I-I I-------- CHANNELS 100 0 1 10 0 1 2 3 4 5 6 7 8 9 0 1 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 CONTENTS 100 11111111111111122222222221222222111111111111111 10 1 1111333334446669000123434878888132522637496233109788775524421007777655443322222111 1. 1266487877127932587516069303434644322909949809367004036056844525243975324963516782565365312194856211 LOW-EDGE -------------------------------------------------- 1. 3222222222222222211111111111111111 111111111111111112222222222222222 0 0988776554432211099887665543322100998776654433211000112334456677899001223345566788990112234455677889 0 0482604826048260482604826048260482604826048260482606284062840628406284062840628406284062840628406284 * ENTRIES = 10000 * ALL CHANNELS = 0.9969E+04 * UNDERFLOW = 0.1200E+02 * OVERFLOW = 0.1900E+02 * BIN WID = 0.6000E-01 * MEAN VALUE =-0.3907E-02 * R . M . S = 0.9857E+00 \end{XMPfrac} \NODOC{\end{minipage}\hfill \begin{minipage}[t]{.465\textwidth}} \begin{XMPfont}{6} TEST2 HBOOK ID = 20 DATE 02/09/89 NO = 2 CHANNELS 10 U 0 1 2 3 O 1 N 123456789012345678901234567890 V ************************************** OVE * + ++ +232++2+ +++ * OVE 2.8 * ++ 2 +2 + 2 + * 30 2.6 * 2 2+ +34+++ ++ + * 29 2.4 * 2+ 3322343+ 3++ + * 28 2.2 * + 2 247236663524+23++ + * 27 2 * + 2+23769597A75 6+2+ 2 * 26 1.8 * + 5598576EBCDAA53357 2+ + * 25 1.6 * ++3278CC9JFO8F98C86643+2+ * 24 1.4 * 344686AAGJJMEMIDFG964232+ + * 23 1.2 * ++++44BBJGMQOPWNICCGI97322++ + * 22 1 * 2+545BGOMTSX*VYTJMCFA755++2 * 21 .8 * 2+4799DHSRUX****VXRQJC57635+ * 20 .6 * + +25CBEKLZ********MXGGCI4322 3 * 19 .4 * 2 4+779BN*U*********YOIFB862 * 18 .2 * 2 ++266CCLR************OIHA464+2 4 * 17 * + 3238ECX*T***********YKPC772 + * 16 - .2 * + +423D6LDS**X********ZUMGC543+ 2 * 15 - .4 * + 2347CAHSSX*********UMK75D2 3 + * 14 - .6 * 2334AAKML*V**********IIH9773++ + * 13 - .8 * +22565CLJL*X******Z*TL9H948+ + * 12 - 1 * 2 2 32666EMLN****Q*ULLQMABB342+ 2 * 11 - 1.2 * + 22377BDIUS*P***TTUNBDA545+2 * 10 - 1.4 * + + 2 +689E7KKNWUNRIHJCEA472+++ + * 9 - 1.6 * 2+3+74BCMJIGOIKEIAAD6643++ 2 * 8 - 1.8 * + + +2222856AA8HGJACB6786+2+2++ * 7 - 2 * + 2 +273598EDC5977634++ * 6 - 2.2 * + + ++2+274977548883+++2 +++ * 5 - 2.4 * + +3367558445+442+ + * 4 - 2.6 * +2 + 2224+6++7234 + + * 3 - 2.8 * + 33+3+322++ + * 2 - 3 * ++ ++ 22 2 +4+2 2 * 1 UND * + + 23 +2+++ + * UND ************************************** LOW-EDGE --------------- 1. 32222211111 1111122222 0 086420864208642024680246802468 * I 19 I * ENTRIES = 10000 PLOT -------I--------I------- * SATURATION AT= 255 12 I 9936 I 19 * SCALE .,+,2,3,.,., A,B, STATISTICS -------I--------I------- * STEP = 1 * MINIMUM=0 I 14 I \end{XMPfont} \NODOC{\end{minipage}} \begin{XMP} ******************************************************** * NTUPLE ID= 30 ENTRIES= 10000 N-TUPLE * ******************************************************** * Var numb * Name * Lower * Upper * ******************************************************** * 1 * X * -.422027E+01 * 0.386411E+01 * * 2 * Y * -.411076E+01 * 0.378366E+01 * * 3 * Z * 0.485187E-04 * 0.179518E+02 * ******************************************************** ===> Directory : //EXAM2/NTUPLE 30 (N) N-TUPLE 10 (1) TEST1 20 (2) TEST2 ===> Directory : //EXAM2 100 (1) Test of HRNDM1 110 (1) Filled according to HTFUN1 200 (2) Test of HRNDM2 210 (2) Fill according to HTFUN2 NREC NWORDS QUOTA(%) FILE(%) DIR. NAME 34 34066 .21 .21 //EXAM2/NTUPLE 41 40438 .26 .26 //EXAM2 \end{XMP} \caption{Adding data to a HBOOK RZ file} \label{FEX2IN} \end{Fighere} \newpage%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \Filename{H2HPLOT-interface} \Section{18cm}{HPLOT interface for high quality graphics} \label{HPLOTINT} \index{HPLOT} \index{HIGZ} \HPLOT{} is a package of Fortran subroutines for producing \HBOOK{} output suitable for graphic devices or in PostScript. It is designed to produce drawings and slides of a quality suitable for presentations at conferences and scientific publications. It does not produce all the numerical information of the HBOOK output routines. It is not restricted by the line printer's poor resolution and unique character sets but it uses the full graphics capabilities of the targeted output device. HPLOT can access an HBOOK data structure and transform it \index{HPLOT} into drawings using the HIGZ graphics package. Some of the available options are : \begin{UL} \item Predefined ISO standard paper size (A4, A3, etc.), horizontal or vertical orientation, with suitable margins. Other sizes are also possible. \item Combination of several plots on the same page, either by windowing or superimposition, or both, with different symbols to distinguish them. \item Titles on the axes and text anywhere on the picture, using various fonts, containing e.g. Greek or special characters. \item Three-dimensional surface representation for 2-dimensional histograms (with hidden-line and hidden-surface removal). \item Colour (if the hardware allows it), hatching, grey levels,\ldots. \end{UL} As a simple example of the use of HPLOT let us consider a program similar to the one in Figure \ref{FEX2IN}. After opening a file on unit 10 to write the metafile output (Fortran \Lit{OPEN} statement), we book, then fill the Ntuple projections, and finally plot them. The call to \Rind{HPLINT} initialises HPLOT and \Rind{HPLCAP} redirects the metafile output to unit 10. The parameters given to HPLOT instruct the program to output all histograms in the current working directory to the metafile using ``standard'' option, while \Rind{HPLEND} closes the metafile. See the HPLOT user's guide~\cite{bib-HIGZHPLOT} for more details. The result of the job and the resulting PostScript file can be compared to the ``lineprinter'' output in Figure \ref{FEX2IN}. \begin{XMPt}{Example of a simple HPLOT program} PROGRAM HPTEST COMMON/PAWC/H(80000) DIMENSION X(3) CHARACTER*8 CHTAGS(3) DATA CHTAGS/' X ',' Y ',' Z '/ *.------------------------------------------------------------ CALL HLIMIT(80000) * Reopen data base OPEN(UNIT=10,file='hplot.meta',form='formatted',status='unknown') CALL HBOOK1(10,'TEST1',100,-3.,3.,0.) CALL HBOOK2(20,'TEST2',30,-3.,3.,30,-3.,3.,250.) CALL HBOOKN(30,'N-TUPLE',3,' ',1000,CHTAGS) * DO 10 I=1,10000 CALL RANNOR(A,B) X(1)=A X(2)=B X(3)=A*A+B*B CALL HFN(30,X) 10 CONTINUE * CALL HPROJ1(10,30,0,0,1,999999,1) CALL HPROJ2(20,30,0,0,1,999999,1,2) CALL HPLINT(0) CALL HPLCAP(-10) CALL HPLOT(0,' ',' ',0) CALL HPLEND CALL HINDEX END \end{XMPt} \begin{Listing} Version 1.13/05 of HIGZ started ........................................................................................................... . . . HBOOK HBOOK CERN VERSION 4.13 HISTOGRAM AND PLOT INDEX 06/02/92 . . . ........................................................................................................... . . . NO TITLE ID B/C ENTRIES DIM NCHA LOWER UPPER ADDRESS LENGTH . . . ........................................................................................................... . . . . . 1 TEST1 10 32 10000 1 X 100 -0.300E+01 0.300E+01 78388 144 . . . . . . 2 TEST2 20 8 10000 2 X 30 -0.300E+01 0.300E+01 78240 298 . . Y 30 -0.300E+01 0.300E+01 77963 268 . . . . 3 N-TUPLE 30 N 77914 39 . . . . . ........................................................................................................... MEMORY UTILISATION MAXIMUM TOTAL SIZE OF COMMON /PAWC/ 80000 \end{Listing} \begin{center} Output generated by HPLOT on printer with graphics capabilities \end{center} \epsfig{file=hbookc11.eps,width=\the\textwidth} % Local Variables: % mode: latex % TeX-master: "hboomain" % End:
{ "alphanum_fraction": 0.4600672722, "avg_line_length": 55.1937219731, "ext": "tex", "hexsha": "7ac8f084bae815f7fdc012a97bd57e5247b48d84", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "berghaus/cernlib-docs", "max_forks_repo_path": "topeterf/hbookch1.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "berghaus/cernlib-docs", "max_issues_repo_path": "topeterf/hbookch1.tex", "max_line_length": 126, "max_stars_count": 1, "max_stars_repo_head_hexsha": "76048db0ca60708a16661e8494e1fcaa76a83db7", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "berghaus/cernlib-docs", "max_stars_repo_path": "topeterf/hbookch1.tex", "max_stars_repo_stars_event_max_datetime": "2019-07-24T12:30:01.000Z", "max_stars_repo_stars_event_min_datetime": "2019-07-24T12:30:01.000Z", "num_tokens": 15512, "size": 61541 }