Search is not available for this dataset
text
string
meta
dict
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage{amsmath} \usepackage{bm} \usepackage[a4paper, inner=2.5cm, outer=2.5cm, top=2.5cm, bottom=2.5cm]{geometry} \usepackage{graphicx} \graphicspath{{figures/}} \title{Classical Laminate Theory} \author{Andy Perez} \date{February 2019} \begin{document} \setlength{\parindent}{0cm} \renewcommand{\thefootnote}{\roman{footnote}} \maketitle This document provides a summarized walk-through of Classical Laminate Theory (CLT) using NASA Reference Publication 1351 \textit{Basic Mechanics of Laminate Composite Plates} \cite{nasa} as a reference. Supplemental and original source information is also found in Jones' \textit{Mechanics of Composite Materials} \cite{jones} \section{Lamina Properties} \label{sec:lamina_properties} Basic material properties are determined empirically, and must be known in order to determine the lamnia's characteristic matrices and the resulting loads and stresses. These values are: \begin{center} \begin{table}[h!] \centering \label{tbl:lamprop} \vspace{1mm} \begin{tabular}{cl} Symbol & Description \\ \hline \hline $\bm{E_{11}}$ & elastic modulus in the ply's 0$^{\circ}$-direction \\ $\bm{E_{22}}$ & elastic modulus in the ply's 90$^{\circ}$-direction \\ $\bm{\nu_{12}}$ & Poisson's ratio in the $2$-direction when the lamina is loaded in the $1$-direction \\ $\bm{G_{12}}$ & Shear modulus in the $12$-plane \\ $\bm{t_{k}}$ & thickness of the lamina \\ $\bm{\alpha_{11}}$ & thermal expansion coefficient in the in the ply's 0$^{\circ}$-direction \\ $\bm{\alpha_{12}}$ & thermal expansion coefficient in the in the ply's 90$^{\circ}$-direction \\ $\bm{\beta_{11}}$ & hygral expansion coefficient in the in the ply's 0$^{\circ}$-direction \\ $\bm{\beta_{12}}$ & hygral expansion coefficient in the in the ply's 90$^{\circ}$-direction \\ $\bm{\theta}$ & the ply orientation (used for off-axis transformations) \end{tabular} \end{table} \end{center} \section{Characteristic Matrices of the Lamina} \label{sec:lamina_matrices} Note that, in general, the subscript $k$ is used to denote that the a value is a lamina value. The characteristic matrices for each lamina can be calculated as follows: \begin{enumerate} \item The \emph{reduced stiffness matrix} $\bm{Q_{k}}$ describes the elastic behavior of the ply in in-plane loading.\footnote{$Q_{k}$ is derived from Staab \cite{staab}, Eq. 3.9, while the values for $Q_{11}$, $Q_{22}$, and $Q_{12}$ are from Nettles \cite{nasa}, Eq. (10).} \begin{equation} \label{var:Qk} % Staab Eq. 3.9 \bm{Q_{k}} = \left[ \begin{array}{ccc} Q_{11} & Q_{12} & 0 \\ Q_{12} & Q_{22} & 0 \\ 0 & 0 & Q_{66} \end{array} \right] \end{equation} where $$ Q_{11} = \frac{E_{11}^{2}}{\left(E_{11} - \nu_{12} \cdot E_{22}\right)} $$ \vspace{1mm} $$ Q_{22} = \frac{E_{11} E_{22}}{\left(E_{11} - \nu_{12}^2 E_{22}\right)} $$ \vspace{1mm} $$ Q_{12} = \frac{\nu_{12} E_{11} E_{22}}{\left(E_{11} - \nu_{12}^2 E_{22}\right)} $$ \vspace{1mm} $$ Q_{66} = G_{12} $$ \item The \emph{strain transformation matrix} $\bm{T_{\varepsilon}}$ is used to transform other characteristic matrices into the laminate coordinate system.\footnote{Staab \cite{staab}, Eq 2.1} \begin{equation} \label{var:Tepsilon} % Staab Eq. 2.1 \bm{T_{\varepsilon}} = \left[ \begin{array}{ccc} m^{2} & n^{2} & mn \\ n^{2} & m^{2} & -mn \\ -2mn & 2mn & m^{2} - n^{2} \\ \end{array} \right] \end{equation} where $$m=\cos\theta$$ $$n=\sin\theta$$ $\theta$ is the relative orientation of the lamina with respect to the laminate coordinate system shown in Figure~\ref{fig:ply_axes}\footnote{\emph{ibid.}, Fig. 3.3}. \begin{figure}[h] % Staab Figure 3.3 \centering \includegraphics[scale=.25]{staab_fig_3-3.png} \caption{On- and Off-axis Ply Orientations} \label{fig:ply_axes} \end{figure} \item The \emph{stress transformation matrix} $\bm{T_{\sigma}}$, by comparison, is calculated by the following equation.\footnote{\emph{ibid.}, Eq. 2.3} \begin{equation} \label{var:Tsigma} \bm{T_{\sigma}} = \left[ \begin{array}{ccc} m^{2} & n^{2} & 2mn \\ n^{2} & m^{2} & -2mn \\ -mn & mn & m^{2} - n^{2} \\ \end{array} \right] \end{equation} It should be noted that if tensor notation is used for both strains, then the stress and strain transformation matrices should be equal, $\bm{T_{\varepsilon}} = \bm{T_{\sigma}}$. \item The \emph{transformed reduced stiffness matrix} $\bm{\overline{Q}_{k}}$ is calculated by modifying $Q_{k}$ by $T_{\varepsilon}$ \footnote{\emph{ibid.} Section 3.2.2}. \begin{equation} \label{var:Qbar} \bm{\overline{Q}_{k}} = \bm{T_{\sigma}}^{-1} \bm{Q_{k}} \bm{T_{\varepsilon}} \end{equation} \end{enumerate} \section{Determining the $\bm{A}$, $\bm{B}$, and $\bm{D}$ Matrices} When lamina are bonded together to form a laminate, there exist three matrices that characterize the stiffness of the laminate. These are the \emph{extensional stiffness matrix} $\bm{A}$, the \emph{extension-bending stiffness matrix} $\bm{B}$, and the \emph{bending stiffness matrix} $\bm{D}$. \subsection{The Extensional Stiffness Matrix} The \emph{extensional stiffness matrix} $\bm{A}$ characterizes the axial, in-plane stiffness of the laminate and is defined \begin{equation} \bm{A} = \left[ \begin{array}{ccc} A_{11} & A_{12} & A_{16} \\ A_{12} & A_{22} & A_{26} \\ A_{16} & A_{26} & A_{16} \\ \end{array} \right] \end{equation} where \begin{equation} A_{ij} = \sum_{k=1}^{n}\big[\overline{Q}_{ij}\big]_{k}\big(z_{k} - z_{k-1}\big) \end{equation} \subsection{The Extension-Bending Coupling Matrix} The \emph{extension-bending coupling matrix} $\bm{B}$ couples the extensional stiffness and the bending stiffness matrices. It is defined: \begin{equation} \bm{B} = \left[ \begin{array}{ccc} B_{11} & B_{12} & B_{16} \\ B_{12} & B_{22} & B_{26} \\ B_{16} & B_{26} & B_{16} \\ \end{array} \right] \end{equation} where \begin{equation} B_{ij} = \frac{1}{2}\sum_{k=1}^{n}\big[\overline{Q}_{ij}\big]_{k}\big(z_{k}^{2} - z_{k-1}^{2}\big) \end{equation} \subsection{The Bending Stiffness Matrix} The \emph{bending stiffness matrix} $\bm{B}$ characterizes the stiffness of the laminate when subjected to bending loads and is defined: \begin{equation} \bm{D} = \left[ \begin{array}{ccc} D_{11} & D_{12} & D_{16} \\ D_{12} & D_{22} & D_{26} \\ D_{16} & D_{26} & D_{16} \\ \end{array} \right] \end{equation} where \begin{equation} D_{ij} = \frac{1}{3}\sum_{k=1}^{n}\big[\overline{Q}_{ij}\big]_{k}\big(z_{k}^{3} - z_{k-1}^{3}\big) \end{equation} \subsection{The $\bm{ABD}$ Matrix} Together, all three stiffness matrices fully characterize the laminate stiffness and can be used to relate applied loads to the resulting strains on a laminate and in its lamina. This relationship is defined as \begin{equation} \left\{\begin{array}{c} \bm{N} \\ \hline \bm{M} \end{array} \right\} = \left[\begin{array}{c|c} \bm{A} & \bm{B} \\ \hline \bm{B} & \bm{D} \end{array} \right] \left\{ \begin{array}{c} \bm{\varepsilon^{0}} \\ \hline \bm{\kappa} \end{array} \right\} \end{equation} which, when expanded, becomes \begin{equation} \left\{\begin{array}{c} N_{xx}'\\ N_{yy}'\\ N_{xy}'\\ \hline M_{xx}'\\ M_{yy}'\\ M_{xy}'\\ \end{array} \right\} = \left[ \begin{array}{ccc|ccc} A_{11} & A_{12} & A_{16} & B_{11} & B_{12} & B_{16} \\ A_{21} & A_{22} & A_{26} & B_{21} & B_{22} & B_{26} \\ A_{61} & A_{62} & A_{66} & B_{61} & B_{62} & B_{66} \\ \hline B_{11} & B_{12} & B_{16} & D_{11} & D_{12} & D_{16} \\ B_{21} & B_{22} & B_{26} & D_{21} & D_{22} & D_{26} \\ B_{61} & B_{62} & B_{66} & D_{61} & D_{62} & D_{66} \\ \end{array} \right] \left\{ \begin{array}{c} \varepsilon_{xx}^{0} \\ \varepsilon_{yy}^{0} \\ \gamma_{xy}^{0} \\ \hline \kappa_{xx} \\ \kappa_{yy} \\ \kappa_{xy} \end{array}\right\} \end{equation} \bibliographystyle{ieeetr} \bibliography{clt} \section{Creating the ABD Matrix} The ABD matrix is a 6x6 matrix that serves as a connection between the applied loads and the associated strains in the laminate. It essentially defines the elastic properties of the entire laminate. To assemble the ABD matrix, follow these steps: \begin{enumerate} \item Calculate reduced stiffness matrix $\bm{Q_{k}}$ for each material used in the laminate (if a laminate uses only one type of composite material, there will be only 1 stiffness matrix). The stiffness matrix describes the elastic behavior of the ply in plane loading \begin{equation} \bm{Q_{k}} = \left[ \begin{array}{ccc} Q_{11} & Q_{12} & 0 \\ Q_{21} & Q_{22} & 0 \\ 0 & 0 & Q_{66} \end{array} \right] \end{equation} where $$ Q_{11} = \frac{E_{11}^{2}}{\left(E_{11} - \nu_{12} \cdot E_{22}\right)} $$ \vspace{1mm} $$ Q_{12} = \frac{\nu_{12} E_{11} E_{22}}{\left(E_{11} - \nu_{12}^2 E_{22}\right)} $$ \vspace{1mm} $$ Q_{22} = \frac{E_{11} E_{22}}{\left(E_{11} - \nu_{12}^2 E_{22}\right)} $$ \vspace{1mm} $$ Q_{66} = G_{12} $$ where $$ Q_{11} = \frac{E_{11}^{2}}{\left(E_{11} - \nu_{12} \cdot E_{22}\right)} $$ \vspace{1mm} $$ Q_{12} = \frac{\nu_{12} E_{11} E_{22}}{\left(E_{11} - \nu_{12}^2 E_{22}\right)} $$ \vspace{1mm} $$ Q_{22} = \frac{E_{11} E_{22}}{\left(E_{11} - \nu_{12}^2 E_{22}\right)} $$ \vspace{1mm} $$ Q_{66} = G_{12} $$ \item Calculate the transformed reduced stiffness matrix $\bm{\overline{Q_{k}}}$ for each ply based on the reduced stiffness matrix and fiber angle. \begin{equation} \bm{\overline{Q}_{k}} = \left[ \begin{array}{ccc} \overline{Q}_{11} & \overline{Q}_{12} & \overline{Q}_{16} \\ \overline{Q}_{21} & \overline{Q}_{22} & \overline{Q}_{26} \\ \overline{Q}_{61} & \overline{Q}_{62} & \overline{Q}_{66} \end{array} \right] \end{equation} where \\ $$ \overline{Q}_{11} = Q_{11}\cos^{4}(\theta) + 2\big(Q_{12} + Q_{66}\big)\cos^{2}(\theta)\cdot\sin^{2}(\theta) + Q_{22}\sin^{4}(\theta) $$ $$ \overline{Q}_{12} = \overline{Q}_{21} = Q_{12}\big(\cos^{4}(\theta) + \sin^{4}(\theta)\big) + \big(Q_{11} + Q_{22} - 4Q_{66}\big)\cos^{2}(\theta)\sin^{2}(\theta) $$ $$ \overline{Q}_{16} = \overline{Q}_{61} = \big(Q_{11} - Q_{12} - 2Q_{66}\big)\cos^{3}(\theta)\sin(\theta) - \big(Q_{22} - Q_{12} - 2Q_{66}\big)\cos(\theta)\sin^{3}(\theta) $$ $$ \overline{Q}_{22} = Q_{11}\sin^{4}(\theta) + 2\big(Q_{12} + Q_{66}\big)\cos^{2}(\theta)\cdot\sin^{2}(\theta) + Q_{22}\cos^{4}(\theta) $$ $$ \overline{Q}_{26} = \overline{Q}_{62} = \big(Q_{11} - Q_{12} - 2Q_{66}\big)\cos(\theta)\sin^{3}(\theta) - \big(Q_{22} - Q_{12} - 2Q_{66}\big)\cos^{3}(\theta)\sin(\theta) $$ $$ \overline{Q}_{66} = \big(Q_{11} + Q_{22} - 2Q_{12} - 2Q_{66}\big)\cos^{2}(\theta)\sin^{2}(\theta) + Q_{66}\big(\cos^{4}(\theta) + \sin^{4}(\theta)\big) $$ \item Calculate the laminate \emph{extensional stiffness matrix}, $\bm{A}$: \begin{equation} \bm{A} = \left[\begin{array}{ccc} A_{11} & A_{12} & A_{16} \\ A_{21} & A_{22} & A_{26} \\ A_{61} & A_{62} & A_{66}\end{array}\right] \end{equation} The individual terms of $\bm{A}$ are calculated by \begin{equation} A_{ij} = \sum_{k=1}^{n} \big\{Q_{ij}\big\}_{n} \left(z_{k} - z_{k-1}\right) \end{equation} where $z$ is the vertical position in the ply from the midplane, and $k$ is for each ply. \item Calculate the laminate \emph{coupling stiffness matrix}, $\bm{B}$: \begin{equation} \left[\begin{array}{ccc} B_{11} & B_{12} & B_{16} \\ B_{21} & B_{22} & B_{26} \\ B_{61} & B_{62} & B_{66}\end{array}\right] \end{equation} where \begin{equation} B_{ij} = \frac{1}{2} \sum_{k=1}^{n} \big\{Q_{ij}\big\}_{n} \left(z_{k}^{2} - z_{k-1}^{2}\right) \end{equation} \item Calculate the laminate \emph{bending stiffness matrix}, $\bm{D_{ij}}$: \begin{equation} \left[\begin{array}{ccc} D_{11} & D_{12} & D_{16} \\ D_{21} & D_{22} & D_{26} \\ D_{61} & D_{62} & D_{66}\end{array}\right] \end{equation} where \begin{equation} D_{ij} = \frac{1}{3} \sum_{k=1}^{n} \big\{Q_{ij}\big\}_{n} \left(z_{k}^{3} - z_{k-1}^{3}\right) \end{equation} \item Assemble the $\bm{ABD}$ matrix: \begin{equation} \bm{ABD} = \left[\begin{array}{c|c} \bm{A} & \bm{B} \\ \hline \bm{B} & \bm{D} \end{array} \right] \end{equation} \end{enumerate} \section{Laminate Properties} Overall laminate properties can be calculated from the $\bm{ABD}$ matrix. \begin{equation} \left\{\begin{array}{c} \bm{N} \\ \hline \bm{M} \end{array} \right\} = \left[\begin{array}{c|c} \bm{A} & \bm{B} \\ \hline \bm{B} & \bm{D} \end{array} \right] \left\{ \begin{array}{c} \bm{\varepsilon^{0}} \\ \hline \bm{\kappa} \end{array} \right\} \end{equation} \begin{equation} \left\{ \begin{array}{c} Q_{x} \\ Q_{y} \end{array} \right\} = \left[ \begin{array}{cc} A_{55} & A_{45} \\ A_{45} & A_{44} \end{array} \right]_{k} \left\{ \begin{array}{c} \gamma_{xz} \\ \gamma_{yz} \end{array} \right\}_{k} \end{equation} where \begin{equation} A_{ij} = c \sum_{k=1}^{N} \left[ \overline{Q}_{ij} \right]_{k} \left\{ \big(z_{k} - z_{k-1} \big) - \frac{4}{3h^{2}} \big(z_{k}^3 - z_{k-1}^3 \big) \right\} \end{equation} where $i,j=4,5$ and $c=6/5$ for a rectangular section. Generally speaking, the stiffness terms ($\overline{Q}_{44}$, $\overline{Q}_{55}$, etc.) associated with $Q_{x}$ and $Q_{y}$ are difficult to experimentally determine and are, therefore, approximated. \begin{equation} \left\{\begin{array}{c} N_{xx}'\\ N_{yy}'\\ N_{xy}'\\ \hline M_{xx}'\\ M_{yy}'\\ M_{xy}'\\ \end{array} \right\} = \left[ \begin{array}{ccc|ccc} A_{11} & A_{12} & A_{16} & B_{11} & B_{12} & B_{16} \\ A_{21} & A_{22} & A_{26} & B_{21} & B_{22} & B_{26} \\ A_{61} & A_{62} & A_{66} & B_{61} & B_{62} & B_{66} \\ \hline B_{11} & B_{12} & B_{16} & D_{11} & D_{12} & D_{16} \\ B_{21} & B_{22} & B_{26} & D_{21} & D_{22} & D_{26} \\ B_{61} & B_{62} & B_{66} & D_{61} & D_{62} & D_{66} \\ \end{array} \right] \left\{ \begin{array}{c} \varepsilon_{xx}^{0} \\ \varepsilon_{yy}^{0} \\ \gamma_{xy}^{0} \\ \hline \kappa_{xx} \\ \kappa_{yy} \\ \kappa_{xy} \end{array}\right\} \end{equation} where $N'$ and $M'$ are the total running loads, including thermal and hygral effects: \begin{equation} N_{ij}' = N_{ij} + N_{ij}^T + N_{ij}^M \end{equation} \begin{equation} M_{ij}' = M_{ij} + M_{ij}^T + M_{ij}^M \end{equation} \section{Calculating Individual Ply Strains and Stresses} Having calculated the $\bm{ABD}$ matrix for the laminate, it is possible to then calculate individual ply strains and stresses based on the loads and temperature applied to the laminate. \begin{enumerate} \item Calculate the thermal expansion coefficients for each ply: \begin{equation} \alpha_{xx} = \alpha_{11} \cos^{2}(\theta) + \alpha_{22} \sin^{2}(\theta) \end{equation} \begin{equation} \alpha_{yy} = \alpha_{11} \sin^{2}(\theta) + \alpha_{22} \cos^{2}(\theta) \end{equation} \begin{equation} \alpha_{xy} = 2\cos(\theta)\sin(\theta)\big(\alpha_{11} - \alpha_{22}) \end{equation} \item Calculate the hygral expansion coefficients for each ply: \begin{equation} \beta_{xx} = \beta_{11} \cos^{2}(\theta) + \beta_{22} \sin^{2}(\theta) \end{equation} \begin{equation} \beta_{yy} = \beta_{11} \sin^{2}(\theta) + \beta_{22} \cos^{2}(\theta) \end{equation} \begin{equation} \beta_{xy} = 2\cos(\theta)\sin(\theta)\big(\beta_{11} - \beta_{22}) \end{equation} \item Calculate the thermal running loads: \begin{equation} N_{xx}^{T} = \Delta T \sum_{k=1}^{n} \Big\{\big[\overline{Q_{11}}\alpha_{xx} + \overline{Q_{12}}\alpha_{yy} + \overline{Q_{16}}\alpha_{xy}\big]_{k}\big(z_{k} - z_{k-1}\big)\Big\} \end{equation} \begin{equation} N_{yy}^{T} = \Delta T \sum_{k=1}^{n} \Big\{\big[\overline{Q_{12}}\alpha_{xx} + \overline{Q_{22}}\alpha_{yy} + \overline{Q_{26}}\alpha_{xy}\big]_{k}\big(z_{k} - z_{k-1}\big)\Big\} \end{equation} \begin{equation} N_{xy}^{T} = \Delta T \sum_{k=1}^{n} \Big\{\big[\overline{Q_{16}}\alpha_{xx} + \overline{Q_{26}}\alpha_{yy} + \overline{Q_{66}}\alpha_{xy}\big]_{k}\big(z_{k} - z_{k-1}\big)\Big\} \end{equation} \begin{equation} M_{xx}^{T} = \frac{\Delta T}{2} \sum_{k=1}^{n} \Big\{\big[\overline{Q_{11}}\alpha_{xx} + \overline{Q_{12}}\alpha_{yy} + \overline{Q_{16}}\alpha_{xy}\big]_{k}\big(z_{k}^{2} - z_{k-1}^{2}\big)\Big\} \end{equation} \begin{equation} M_{yy}^{T} = \frac{\Delta T}{2} \sum_{k=1}^{n} \Big\{\big[\overline{Q_{12}}\alpha_{xx} + \overline{Q_{22}}\alpha_{yy} + \overline{Q_{26}}\alpha_{xy}\big]_{k}\big(z_{k}^{2} - z_{k-1}^{2}\big)\Big\} \end{equation} \begin{equation} M_{xy}^{T} = \frac{\Delta T}{2} \sum_{k=1}^{n} \Big\{\big[\overline{Q_{16}}\alpha_{xx} + \overline{Q_{26}}\alpha_{yy} + \overline{Q_{66}}\alpha_{xy}\big]_{k}\big(z_{k}^{2} - z_{k-1}^{2}\big)\Big\} \end{equation} \item Calculate the hygral expansion running loads: \begin{equation} N_{xx}^{M} = \Delta T \sum_{k=1}^{n} \Big\{\big[\overline{Q_{11}}\beta_{xx} + \overline{Q_{12}}\beta_{yy} + \overline{Q_{16}}\beta_{xy}\big]_{k}\big(z_{k} - z_{k-1}\big)\Big\} \end{equation} \begin{equation} N_{yy}^{M} = \Delta T \sum_{k=1}^{n} \Big\{\big[\overline{Q_{12}}\beta_{xx} + \overline{Q_{22}}\beta_{yy} + \overline{Q_{26}}\beta_{xy}\big]_{k}\big(z_{k} - z_{k-1}\big)\Big\} \end{equation} \begin{equation} N_{xy}^{M} = \Delta T \sum_{k=1}^{n} \Big\{\big[\overline{Q_{16}}\beta_{xx} + \overline{Q_{26}}\beta_{yy} + \overline{Q_{66}}\beta_{xy}\big]_{k}\big(z_{k} - z_{k-1}\big)\Big\} \end{equation} \begin{equation} M_{xx}^{M} = \frac{\Delta T}{2} \sum_{k=1}^{n} \Big\{\big[\overline{Q_{11}}\beta_{xx} + \overline{Q_{12}}\beta_{yy} + \overline{Q_{16}}\beta_{xy}\big]_{k}\big(z_{k}^{2} - z_{k-1}^{2}\big)\Big\} \end{equation} \begin{equation} M_{yy}^{M} = \frac{\Delta T}{2} \sum_{k=1}^{n} \Big\{\big[\overline{Q_{12}}\beta_{xx} + \overline{Q_{22}}\beta_{yy} + \overline{Q_{26}}\beta_{xy}\big]_{k}\big(z_{k}^{2} - z_{k-1}^{2}\big)\Big\} \end{equation} \begin{equation} M_{xy}^{M} = \frac{\Delta T}{2} \sum_{k=1}^{n} \Big\{\big[\overline{Q_{16}}\beta_{xx} + \overline{Q_{26}}\beta_{yy} + \overline{Q_{66}}\beta_{xy}\big]_{k}\big(z_{k}^{2} - z_{k-1}^{2}\big)\Big\} \end{equation} \item Calculate the inverse $\bm{ABD}$ matrix, $\bm{abd}$: \begin{equation} \bm{abd} = \left\{ \begin{array}{c|c} \bm{A^{-1}} & \bm{B^{-1}} \\ \hline \bm{B^{-1}} & \bm{D^{-1}} \end{array} \right\} \end{equation} This inverse matrix is the \emph{stiffness matrix} of the laminate. \item Calculate midplane strains and curvatures induced in the laminate using the relationship between strain, stiffness, and the applied load. \begin{equation} \bm{\varepsilon} = \bm{abd}\cdot\bm{N} \end{equation} Expanded, this equation becomes: $$ \left[\begin{array}{c} \varepsilon_{xx}^{0} \\ \varepsilon_{yy}^{0} \\ \gamma_{xy}^{0} \\ \hline \kappa_{xx} \\ \kappa_{yy} \\ \kappa_{xy} \end{array}\right] = \left[\begin{array}{ccc|ccc} a_{11} & a_{12} & a_{16} & b_{11} & b_{12} & b_{16} \\ a_{21} & a_{22} & a_{26} & b_{21} & b_{22} & b_{26} \\ a_{61} & a_{62} & a_{66} & b_{61} & b_{62} & b_{66} \\ \hline b_{11} & b_{12} & b_{16} & d_{11} & d_{12} & d_{16} \\ b_{21} & b_{22} & b_{26} & d_{21} & d_{22} & d_{26} \\ b_{61} & b_{62} & b_{66} & d_{61} & d_{62} & d_{66} \\\end{array}\right] \cdot \left[\begin{array}{c} N_{xx} + N_{xx}^T + N_{xx}^M \\ N_{yy} + N_{yy}^T + N_{yy}^M \\ N_{xy} + N_{xy}^T + N_{xy}^M \\ \hline M_{xx} + M_{xx}^T + M_{xx}^M \\ M_{yy} + M_{yy}^T + M_{yy}^M \\ M_{xy} + M_{xy}^T + M_{xy}^M \\ \end{array}\right] $$ \item Individual ply strains are then calculated in the laminate $xy$-coordinate system by the equation: \begin{equation} \left\{\begin{array}{c} \varepsilon_{xx} \\ \varepsilon_{yy} \\ \gamma_{xy} \end{array}\right\} = \left\{\begin{array}{c} \varepsilon_{xx}^{0} \\ \varepsilon_{xy}^{0} \\ \gamma_{xy}^{0} \end{array}\right\} + z \left\{\begin{array}{c} \kappa_{xx} \\ \kappa_{yy} \\ \kappa_{xy} \end{array}\right\} \end{equation} \item Individual ply stresses are similarly calculated in the $xy$-coordinate system by the corresponding equation: \begin{equation} \left[\begin{array}{c} \sigma_{xx} \\ \sigma_{yy} \\ \tau_{xy} \end{array}\right] = \left[ \begin{array}{ccc} \overline{Q}_{11} & \overline{Q}_{12} & \overline{Q}_{16} \\ \overline{Q}_{21} & \overline{Q}_{22} & \overline{Q}_{26} \\ \overline{Q}_{61} & \overline{Q}_{62} & \overline{Q}_{66} \end{array} \right] \cdot \left\{\begin{array}{c} \varepsilon_{xx} - \Delta T \alpha_{xx} - \Delta T \beta_{xx}\\ \varepsilon_{yy} - \Delta T \alpha_{yy} - \Delta T \beta_{yy}\\ \gamma_{xy} - \Delta T \alpha_{xy} - \Delta T \beta_{xy} \end{array}\right\} \end{equation} \item Interlaminar shear forces on each ply are calculated \end{enumerate} \end{document}
{ "alphanum_fraction": 0.5379056848, "avg_line_length": 49.0594262295, "ext": "tex", "hexsha": "6df9f195da9c6ad3e254d003880cc619ed6b0bd7", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "84bcb6ea0b2583df4918fedcaeb8c8557cdc3473", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sharkweek/brokkr", "max_forks_repo_path": "docs/ref/Classical Laminate Theory/Classical Laminate Theory.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "84bcb6ea0b2583df4918fedcaeb8c8557cdc3473", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sharkweek/brokkr", "max_issues_repo_path": "docs/ref/Classical Laminate Theory/Classical Laminate Theory.tex", "max_line_length": 327, "max_stars_count": 3, "max_stars_repo_head_hexsha": "84bcb6ea0b2583df4918fedcaeb8c8557cdc3473", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sharkweek/brokkr", "max_stars_repo_path": "docs/ref/Classical Laminate Theory/Classical Laminate Theory.tex", "max_stars_repo_stars_event_max_datetime": "2021-06-26T20:34:29.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-12T11:04:30.000Z", "num_tokens": 8465, "size": 23941 }
\documentclass[double]{amsart} \usepackage[margin=3cm]{geometry} % See geometry.pdf to learn the layout options. There are lots. \geometry{letterpaper} % ... or a4paper or a5paper or ... %\geometry{landscape} % Activate for for rotated page geometry \usepackage[parfill]{parskip} % Activate to begin paragraphs with an empty line rather than an indent \usepackage{float} \usepackage{graphicx} \usepackage{amssymb} \usepackage{epstopdf} \usepackage{setspace} \usepackage{sidecap} \usepackage[table,xcdraw]{xcolor} \DeclareGraphicsRule{.tif}{png}{.png}{`convert #1 `dirname #1`/`basename #1 .tif`.png} \title{Lab 8: Diffraction Interference} \author{ Caspar \textsc{Lant}} % Author name \date{\today} % Date for the report \begin{document} \bigskip \maketitle % Insert the title, author and date \begin{center} Intermediate Experimental Physics I\\ \vspace{1.5cm} \begin{tabular}{l r} Section: & 002\\ \\ Date Performed: & November 18, 2015 \\ % Date the experiment was performed Date Due: & December 2, 2015\\ \\ Partner: & Sam P. Meier \\ % Partner names Professor: & Prof. Andrew Kent\\ Instructor: & David Mykytyn % Instructor/supervisor \end{tabular} \end{center} \vspace{50mm} \pagebreak {\setstretch{1.3} \paragraph{\textbf{The Objective} of this week's experiment is to prove that light is a wave by means of observing constructive and destructive interference between two rays of coherent light, as well as to explore the relationship between the angle of light incident on a screen, the distance from the screen at which diffraction occurs, and the pattern projected onto the screen.} \section{Theoretical Background/ Abstract} \paragraph[h]{Way back in the seventeenth century, Christiann Huygens (whom we remember from our studies of pendular motion) proposed that light--that glowy thing with which we cannot do without--was a sort of wave. In the following series of experiments, we will prove him right.} \paragraph[h!]{A pair of light waves, like all pairs of waves, can interfere with each other in a manner either constructive or destructive. Two light waves of equal wavelength, if out of phase, will interfere with one another \textit{periodically}. Consistent with its own waviness, light experiences diffraction when met with an aperture, or slit in this case, who's width is less than the wavelength of the incident light. After diffraction takes place, the once-parallel fronts of a wash of light become crescent-shaped. These crescent shaped fronts can be drawn as radial rays emanating from the diffraction slit, which interfere with each other at regular intervals. Take two waves that begin at opposite ends of the slit, and hit the screen at the same point. The difference in the distance that these two waves travel must be an integer multiple of their common wavelength for maximum constructive interference to occur. It is at these points on the screen where we see our bands of greatest intensity. Similarly, "dark points" on the screen represent positions at which the difference in distance is an odd multiple of half-wavelength; where the waves are 180$^{\circ}$ out of phase. For single-slit diffraction, these periodic areas of minima and maxima lack definition and decay quickly. The central maximum is far brighter than either of its neighboring maxima. This can be seen in the following schematic and equations: \\ } \paragraph{} \begin{figure}[H] \begin{minipage}{.49\textwidth} \centering \includegraphics[width=5.5cm]{sinslit.png} \end{minipage} % \begin{minipage}{.49\textwidth} \begin{equation} I = I_0 \left(\dfrac{\sin [\pi a \sin \theta / \lambda]}{\pi a \sin \theta / \lambda}\right)^{2} \end{equation} \begin{equation} \sin\theta = \dfrac{m\lambda}{a} \qquad (m = \pm1, \pm2, ...) \end{equation} \end{minipage} \end{figure} \paragraph{Our value for wavelength can be derived through the following: $\sin\theta = \frac{m\lambda}{a} \rightarrow \lambda = \frac{a\sin\theta}{m}$, where $m =1$ for the first two maxima. $\theta$ is given by $tan^{-1}\left(\dfrac{d_{bands}}{D}\right)$, so $\lambda = a\sin\left[\tan^{-1}\dfrac{d_{bands}}{D}\right]$} \newpage \paragraph{Taking $\theta = 0$ in equation (1), to calculate the intensity of light at the central maxima, yields an undefined answer. With some clever math, we can take the derivative of each end of the fraction, and see that the value for intensity at $\theta = 0$ is in fact $I_0$. This is justified by L'H\^{o}pital's Rule, which states that if both sides of the fraction go to 0 or $\pm\infty$, the the limit of a fraction is equal to the limit of the derivative of the numerator, divided by the derivative of the denominator.} \paragraph{In the case of the double-slit diffraction experiment, the bands of light projected onto the screen are much more uniform, as seen in the diagram below. The relationship between the angle of light incident on a screen, the distance from the screen at which diffraction occurs, and the intensity of the light incident on the corresponding section of screen can also be seen below.}} \begin{figure}[H] \begin{minipage}{.49\textwidth} \centering \includegraphics[width=8cm]{doubsli.png} \end{minipage} % \begin{minipage}{.49\textwidth} \begin{equation} I = I_0 cos^2\left[\pi d \sin\left(\frac{\theta}{\lambda}\right)\right] \end{equation} \begin{equation} I = I_0 cos^2\left[\pi d \sin \theta / \lambda \right]\left(\dfrac{\sin [\pi a \sin\theta/\lambda]}{\pi a \sin\theta / \lambda}\right)^{2} \end{equation} \end{minipage} \end{figure} \paragraph{The derivation for wavelength in a setup with two slits goes as follows: $\sin\theta = \frac{\text{path difference}}{\text{distance between slits}}$, which for a maximum (point of constructive interference) is equal to the wavelength divided by the distance between slits, or $\lambda/d$. We know from trigonometry that $\tan\theta = \frac{d_\text{bands}}{D_\text{screen}}$, so for small values of $\theta$, $\lambda = \frac{a\cdot d_{\text{bands}}}{D}$. You'll notice that this value for wavelength does not depend on the width of the slits. } \section{Experimental Procedure} \begin{enumerate} \item Attach the laser to the supplied optical bench, as in the pervious lab experiment. Turn it on. \item Mount a screen on the opposing end of the optical bench, making sure that it stationary and plum to the laser beam. \item Carefully measure the distance from the laser's aperture to the screen you have just erected. \item Place the slide populated with single slits on the optical bench between the laser and screen, such that the beam shines through the narrowest slit. \item You should see a diffraction pattern projected onto the screen. Measure the distance between the middle of the central maximum (brightest point) and the first dark band. This distance is equivalent to the distance between any neighboring maxima. \item Reposition the slide such that the laser shines through the second slit. Measure and record the distance between maxima \item Repeat step 6 for the third and fourth slits. \item Once finished, mount the slide with four pairs of slits in the path of the laser. \item Record the values for distance between central maxima for each pair of slits, as well as the distance between "sub maxima" between each pair of bands, if you can. \end{enumerate} \section{Data Tables and Analysis} \begin{table}[H] \centering \caption{My caption} \label{my-label} \begin{tabular}{ >{\columncolor[HTML]{FFFFFF}}c | >{\columncolor[HTML]{FFFFFF}}c | >{\columncolor[HTML]{FFFFFF}}c | >{\columncolor[HTML]{EFEFEF}}c | >{\columncolor[HTML]{EFEFEF}}c } {\color[HTML]{333333} Width of Slits (mm)} & {\color[HTML]{333333} Distance Between Bands (mm)} & Error in Distance(mm) & $\lambda$ (nm) & $\lambda_2$ (nm) \\ \hline 0.020 & 17.50 & $\pm$0.50 & 667.6 & 667.9 \\ 0.040 & 8.20 & $\pm$0.50 & 625.9 & 626.0 \\ 0.080 & 4.10 & $\pm$0.50 & 625.9 & 626.0 \\ 0.160 & 1.90 & $\pm$0.50 & 580.1 & 580.2 \end{tabular} \\Distance Between Slits and Screen: 542mm $\pm$5mm \end{table} {\setstretch{1.3} \paragraph{Computing the values for wavelength using the formula derived in section 1 leaves us with the following table. Also included are values made using the paraxial assumption: $\sin\theta\approx\tan\theta\approx\theta$. The range of values for red light, according to the French Institute of Wavelength Specification (Institut fran\c{c}ais de longueur d'onde Sp\'{e}cification) is between 620 and 750 nm, which wrap nicely around all but one of our values for wavelength.} \paragraph{} \begin{table}[H] \centering \caption{Double Slit} \label{my-label} \begin{tabular}{c|c|c|c} Distance Between Slits (mm) & Width of Slits (mm) & Distance Between Bands (mm) & Error (mm) \\ \hline 0.250 & 0.04 & 9.20 & $\pm$0.50 \\ 0.500 & 0.04 & 8.50 & $\pm$0.50 \\ 0.250 & 0.04 & 4.00 & $\pm$0.50 \\ 0.500 & 0.04 & 3.90 & $\pm$0.50 \end{tabular} \\Distance Between Slits and Screen: 515mm $\pm$5mm \end{table} \section{Error Analysis} \paragraph{We incurred substantial error in the calculation of the laser's wavelength from the double slit data.Using the paraxial approximation, we can compute the error using the following formula, which gives us the table below. As you can see, most of our values for wavelength fall within the prescribed range of red light. The standard deviation in our first set of values, after corrections made for error, was 18.5 nanometers, or 2.87\% of the average value for wavelength. Our sources of error are hard to pin down, and few fall outside inaccuracy of measurement, which isn't very exciting to talk about. You'll notice that our error in wavelength tends to increase with slit width in, at least in the single slit experiment. The precision of our lab equipment, has some finite value, but we treat given quantities like slit width and distance of "errorless," because we are not given the value of manufacturing tolerance.} \paragraph{} \begin{figure}[H] \begin{minipage}{.43\textwidth} \begin{equation} \delta\lambda = |\lambda||d_{slits}|\sqrt{\left( \dfrac{\delta d_\text{bands}}{d_\text{bands}} \right)^2+ \left(\dfrac{\delta D}{D}\right)^2} \end{equation} \end{minipage} % \begin{minipage}{.27\textwidth} \begin{table}[H] \centering Single Slit \begin{tabular}{c|c|c} $\delta\lambda$ & $\lambda + \delta\lambda$ & $\lambda - \delta\lambda$ \\ \hline 0.4 & 668.0 & 667.2 \\ 1.5 & 627.4 & 624.3 \\ 6.1 & 632.1 & 619.8 \\ 24.4 & 604.6 & 555.7 \end{tabular} \end{table} \end{minipage} % \begin{minipage}{.27\textwidth} \begin{table}[H] \centering Double Slit \begin{tabular}{c|c|c} $\delta\lambda$ & $\lambda + \delta\lambda$ & $\lambda - \delta\lambda$ \\ \hline 6.2 & 452.7 & 440.4 \\ 24.5 & 849.7 & 800.5 \\ 6.1 & 200.3 & 188.1 \\ 24.3 & 403.0 & 354.3 \end{tabular} \end{table} \end{minipage} \end{figure} \paragraph{You'll also notice that out propensity for error increased in the measurement of wavelength from the double-slit experiment. This could be due to our use of the paraxial approximation, but is more likely a result of measuring the distance between light maxima. Measuring this quantity was a difficult task, as the markings on our measuring device were spaced on a similar scale as the light bands were. To mitigate error in future trials, I would recommend using a more accurate measuring device, as well as devising a system which didn't rely on the experimenter's ability to draw a straight line without moving the screen.} \section{Questions} \begin{enumerate} % \item{\textit{Note that the forward direction, $\theta$ = 0, is maximum in intensity, not a zero. Why?} \begin{quote} We can use L'H\^{o}pital's Rule to compute the limit of the above equation when angle goes to zero: $$\lim_{\theta\to0} \left( \dfrac{\lambda\sin\left[\frac{\pi a \sin\theta}{\lambda}\right]}{\pi a \sin\theta}\right)^2 = \lim_{\theta\to0} \left( \dfrac{\frac{\partial}{\partial \theta}\lambda\sin\left[\frac{\pi a \sin\theta}{\lambda}\right]}{\frac{\partial}{\partial \theta} \pi a \sin\theta}\right)^2 = \ \ $$$$\lim_{\theta\to0} \left(\dfrac{\pi a \cos\theta \cos[\frac{\pi a \sin\theta}{\lambda}]}{\pi a \cos\theta}\right)^2 =\lim_{\theta\to0} \cos^2\left(\dfrac{\pi a \sin\theta}{\lambda}\right) = 1$$ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $ \huge\therefore I = I_0\biggr\rvert_{\theta = 0}$ \end{quote}} \medskip \item {\textit{For what angles is the intensity zero?} \begin{quote} $$I = I_0 cos^2\left[\pi d \sin \theta / \lambda \right]\left(\dfrac{\sin [\pi a \sin\theta/\lambda]}{\pi a \sin\theta / \lambda}\right)^{2}$$ The intensity of is zero when $\sin [ \pi a \sin \theta / \lambda] = 0 $, or when $\theta = \sin^{-1}\left(\frac{m \lambda}{a}\right)$, as well as when $cos^2\left[\pi d \sin \theta / \lambda \right] = 0$. $\cos x = 0$ when $x$ is an an odd multiple of $\pi /2 $, so $d \sin \theta/\lambda = 4a \sin\theta/\lambda = \frac{\lambda(2m -1)}{a}$. \[ \boxed{ $$\theta = \sin^{-1}\left(\dfrac{\lambda(2m-1)}{a} \right)$$ }\] \end{quote}} \end{enumerate} \paragraph{} } \end{document}
{ "alphanum_fraction": 0.6770536224, "avg_line_length": 68.7450980392, "ext": "tex", "hexsha": "86525f8d5e6d7e412eb87b61fc2766657363e6d1", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1b4d45d9e915a84ecb80a39498850463bbc2d3be", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "caspar/PhysicsLab", "max_forks_repo_path": "08_DiffractionInterference/DiffractionInterference.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1b4d45d9e915a84ecb80a39498850463bbc2d3be", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "caspar/PhysicsLab", "max_issues_repo_path": "08_DiffractionInterference/DiffractionInterference.tex", "max_line_length": 1437, "max_stars_count": 1, "max_stars_repo_head_hexsha": "1b4d45d9e915a84ecb80a39498850463bbc2d3be", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "caspar/PhysicsLab", "max_stars_repo_path": "08_DiffractionInterference/DiffractionInterference.tex", "max_stars_repo_stars_event_max_datetime": "2016-05-08T19:42:20.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-08T19:42:20.000Z", "num_tokens": 3893, "size": 14024 }
\subsection{Skills} \begin{longtable} {r|p{13cm}} \textsc{Hardware} & Design of Schematics, Footprints and Digital \& Analog Circuits\\ \textsc{Embedded} & Development of Baremetal Firmware, IoT \& Embedded Linux\\ \textsc{Robotics} & Development of AI, ML, Computer Vision \& Motion Control Systems\\ \textsc{Mechanics} & Design of Manufacturable Electro-Mechanical Systems\\ \textsc{Fabrication} & Additive \& Subtractive Manufacturing of Wood, Plastic \& Metal\\ \textsc{Manufacturing} & Design for Manufacturing, Assembly, Certification \& Cost\\ \textsc{Software} & Development of Desktop, Web, Mobile \& Cloud Applications\\ \textsc{DevOps} & Deployment of Build, Test, Monitoring \& Runtime Infrastructure\\ \textsc{Multimedia} & Design of Graphics \& Audio-Visual-Haptic Art Installations\\ \textsc{Deep Tech} & Sensor Design, BioTech, Bioinformatics \& NanoTech (Graphene)\\ \textsc{Entrepreneurship} & Business \& Financial Modeling, Investment, Recruitment \newline Performance Marketing, Networking \& Company Culture\\ \textsc{Leadership} & Design Thinking, Product Development Lifecycle, Agile Project \newline \& Team Management (SCRUM \& KANBAN), Team Building, \newline Goal-setting, Coaching (NLP) \& Expectation Management\\ \textsc{Social} & Community Building, Mentoring, Empowerment \& Sustainability\\ \end{longtable}
{ "alphanum_fraction": 0.7523739956, "avg_line_length": 76.0555555556, "ext": "tex", "hexsha": "7b053234f0b15a971cf28d89d0cac2537957159c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "07f2bbb4403553b3d588150a8e8e051833430725", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "AravinthPanch/aravinth.info", "max_forks_repo_path": "aravinth-cv/content/work-skills.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "07f2bbb4403553b3d588150a8e8e051833430725", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "AravinthPanch/aravinth.info", "max_issues_repo_path": "aravinth-cv/content/work-skills.tex", "max_line_length": 213, "max_stars_count": null, "max_stars_repo_head_hexsha": "07f2bbb4403553b3d588150a8e8e051833430725", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "AravinthPanch/aravinth.info", "max_stars_repo_path": "aravinth-cv/content/work-skills.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 378, "size": 1369 }
\section{Winged Demon} asdf
{ "alphanum_fraction": 0.7333333333, "avg_line_length": 10, "ext": "tex", "hexsha": "ff7b44dc5f623bece8bc8c41e22978c0a103c089", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_forks_repo_path": "npcs/fiends/wingeddemon.tex", "max_issues_count": 155, "max_issues_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_issues_repo_issues_event_max_datetime": "2022-03-03T13:49:05.000Z", "max_issues_repo_issues_event_min_datetime": "2018-03-18T13:19:57.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_issues_repo_path": "npcs/fiends/wingeddemon.tex", "max_line_length": 23, "max_stars_count": 6, "max_stars_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_stars_repo_path": "npcs/fiends/wingeddemon.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-03T09:32:08.000Z", "max_stars_repo_stars_event_min_datetime": "2018-03-13T09:33:31.000Z", "num_tokens": 8, "size": 30 }
% Sample file for KONA % For ASCII pLaTeX2e % This file requires article.cls % 2017.01.13 by Nakanishi Printing Co., Ltd % 2017.06.26 modified by Hao Shi, MSM, University of Twente, the Netherlands % 2019.01.10 modified by Nakanishi Printing Co., Ltd \documentclass[twocolumn, 10pt]{article} \usepackage{amsmath,amssymb} \usepackage{graphicx} \usepackage[format=hang,labelfont=bf,textfont=small,singlelinecheck=false,justification=raggedright,margin={12pt,12pt},figurename=Fig.]{caption} \usepackage{titlesec} \usepackage{textcomp} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \makeatletter \def\affiliation#1{\gdef\@affiliation{#1}} \def\abstract#1{\gdef\@abstract{#1}} \def\graphabst#1{\gdef\@graphabst{#1}} \def\keywords#1{\gdef\@keywords{#1}} \def\corresp#1{\gdef\@corresp{#1}} \def\bioauthor#1#2{\centerline{\textbf{#1}}\par #2} \newcommand{\MakeTitle}{ \newpage \null \vskip 2em% \begin{center}% \Large \@title\par \vskip 1em% \large \@author \end{center} \noindent\@affiliation\par \vskip 1em% \noindent\@corresp\par \vskip 1em% \noindent\@abstract\par \noindent\@graphabst\par \vskip 1em% \noindent\@keywords\par } \makeatother \setlength{\columnsep}{0.8cm} \newcommand*{\TitleFont}{% \usefont{\encodingdefault}{\rmdefault}{}{n}% \fontsize{18}{12}% \selectfont} \titleformat{\section} {\normalfont\fontsize{10}{11}\bfseries}{\thesection.}{2pt}{} \titlespacing*{\section}{0pt}{12pt}{6pt} \titleformat{\subsection} {\normalfont\fontsize{10}{10}\bfseries}{\thesubsection.}{2pt}{} \titlespacing*{\subsection}{0pt}{6pt}{0pt} \titleformat{\subsubsection} {\normalfont\fontsize{10}{10}\bfseries}{\thesubsubsection.}{2pt}{} \titlespacing*{\subsubsection}{0pt}{6pt}{0pt} \renewcommand{\baselinestretch}{1.10}\normalsize %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \title{\TitleFont Type here your title, times new roman 18, centred. To prepare your paper use directly this template and simply replace this text by your text} \author{Ling N. CUI$^{\, 1}$, Toyokazu YOKOYAMA$^{2\ast }$} \affiliation{$^{1}$Affiliation, Address, \\ $^{2}$Affiliation if different from 1, Address if different from 1} \corresp{$^{\ast }$correspopnding author [email protected], Tel.:$+$81-72-867-1686; fax: $+$81-72-867-1658} \abstract{\textbf{Abstract}: This document contains formatting instructions for preparing a paper for KONA. These formatting instructions comply with the rules set by Hosokawa Powder Technology Foundation (HPTF) for the publication of the papers in the series: KONA Powder and Particle Journal.\\ The manuscript title must be in ``Title Case'', i.e., when writing a name or a title, you should use capital letters only for the first word.\\ Title should be followed by the list of authors in the format given above, denoting the corresponding author with an asterisk. Further should be given the author affiliations. Below that, the email and telephone and fax numbers of the corresponding author should be provided.\\ Start this abstract paragraph which should summarize the scope, aims, results and conclusions of the work, and should not exceed 200 words. Below that, a graphical abstract should be provided, which should be a concise, visual summary of the article and will be displayed in the contents list both online on print.\\ If you need assistance, please do not hesitate to contact KONA editorial secretariat, or any further address you may have received for this purpose.} \graphabst{ \begin{center} %%%%\includegraphics[scale=0.65,angle=0]{Graphical_Abstract/KONA_graphical_abstract_v4.pdf} \end{center} } \keywords{\textbf{Keywords:} powder, particle, the appropriate number of keywords is 5 or 6.} \begin{document} \onecolumn \MakeTitle \twocolumn %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Format And Type Fonts} To prepare your paper, use directly this template and simply replace this text by your text. These instructions are to be followed strictly, and it is strongly advised to use the styles indicated in this document between square brackets. It is strongly advised NOT to use formatting or styles in your paper different from the ones mentioned here. \subsection{Format} The book size will be in A4 (210 x 297 mm). Left margin 25 mm, Right Margin 20 mm, Top Margin 25 mm and Bottom Margin 25 mm. Please make sure that you do not exceed the indicated type area. The structure of manuscripts should follow the following order; title, authors, affiliations, abstract, Graphical Abstract, keywords, main text, (acknowledgement), (appendix), (nomenclature), references. The items with parentheses are not mandatory. The maximum pages printed in KONA are supposed to be 15 for an original paper and 25 for a review paper. Do NOT include page numbers. Do NOT add Headers or Footers. \subsection{Type font and type size} Prescribed font is Times New Roman, 10 points, with an 11 pts line spacing (1.1 multiple lines), 1 column. However, if your text contains complicated mathematical expressions or chemical formulae, you may need to increase the line spacing. Running text should be justified. \section{Section headings} The way chapter titles and other headings are displayed in these instructions, is meant to be followed in your manuscript. Level 1: Times New Roman, 11, Bold, 12 pt spacing before heading, 6 pt spacing below heading Successive Levels: Times New Roman, 10, Bold, 6 pt spacing before heading, NO spacing below heading, Do NOT begin a new section directly at the bottom of the page, but transfer the heading to the top of the next page. \section{(Foot)notes} It is requested to minimize usage of footnotes. All references should be in the References. Explanations should be preferably included in the text. (Foot)notes placed at the bottom of the page should fit within the type area. Separate them clearly from the text by adding two line spaces. Use Times New Roman 8 pt. \section{Symbols and units, numbers } If symbols are defined in a nomenclature section, symbols and units should be listed in alphabetical order with their definition and dimensions in SI units. In principle, variables are to be presented in italics. Please use the SI set of units as much as possible. Wherever the application domain uses a different set of units widely, please minimize the use of non-standard units or non-standard symbols for those units. As examples, the use of ``a'' for year (annum) is depreciated and the use of ``y'' is encouraged instead. Similarly, ``h'' should be used for hours instead of ``hr'' and ``t'' instead of ``ton'' or ``tonne''. It is important to take care of the case in which the measurement units are typed. E.g. ``Km'' does not mean ``kilometers'', but ``Kelvin-meters''. Powers of e are often more conveniently denoted by exp. When providing numerical values followed by measurement units, please leave a regular space or non-breaking space between each value and the measurement unit. This also includes percentages and degrees Celsius (e.g. 42~{\%} or 35 {\%}, 234 \textdegree C, 504 K). This rule also applies to the unit for litre, which is recommended to be capital ``L''. The authors are encouraged to render the numbers according to the International rules, specifying the dot as a decimal separator and the comma as a thousand's separator. \section{Equations} Make sure that placing and numbering of equations is consistent throughout your manuscript. \begin{gather} \label{eq1} \gamma_{\mbox{T}} \frac{\mbox{d}x}{\mbox{d}t}\,=\,F_{\mbox{d}} \,\cos \varphi \,+\,\xi_{x} \mbox{(}t\mbox{)} \\ \label{eq2} \overline {\mbox{C}} (t)=\frac{1}{N}\sum\limits_{i=1}^N {C_{i} } (t) \end{gather} Left align the equation and put the number of the equation flush-right, using a Right Tab on the right margin. Please reference equations in the text by writing: Eqn. .. (do not use Equation ..) In principle, variables are to be presented in italics. \section{Figures and tables} \subsection{General} Figures and tables should be originals or sharp prints. Please use the SI set of units as much as possible. Figures and tables should be centered and placed either at the top or at the bottom of the page. Please do not render tables as pictures and please do not use too small font sizes in the illustrations. Please use the following fonts in your illustrations: Times New Roman, Symbol, or use fonts that look similar. If your figures and tables are created in a Microsoft Office application (Word, PowerPoint, Excel) then please supply 'as is' in the native format, too. Regardless of the application used other than Microsoft Office, when your electronic artwork is finalized, please 'Save as' or convert the images to one of the following formats (note the resolution requirements for line drawings, halftones, and line/halftone combinations given below): EPS (or PDF): Vector drawings, embed all used fonts. TIFF (or JPEG): Color or grayscale photographs (halftones), keep to a minimum of 300 dpi. TIFF (or JPEG): Bitmapped (pure black {\&} white pixels) line drawings, keep to a minimum of 1000 dpi. TIFF (or JPEG): Combinations bitmapped line/half-tone (color or grayscale), keep to a minimum of 500 dpi. The colour figures will appear in colour both on the Web (\underline{http://www.kona.or.jp}) and in the paper version. Authors are responsible for obtaining permission from the copyright holders to reproduce any figures, tables and photos for which copyright exists. And the copyright and permission notice should appear in table footnotes and figure captions. \subsection{Tables} Set table number and title flush left above table. Horizontal lines should be placed above and below table headings and at the bottom of the table. Vertical lines should be avoided. Title should use Times New Roman 10, italic, with 12 pt before and 4 pts after the paragraph, left justified at the top of the table. Tables have to be included into the text. If a table is too long to fit one page, the table number and heading should be repeated on the next page before the table is continued. Alternatively the table may be spread over two consecutive pages (first an even numbered, then an odd-numbered page) turned by 90\textdegree , without repeating the heading. \textbf{Table 1} Table title should be placed above the table and adjust text to table width. \begin{table}[htbp] \begin{center} \begin{tabular}{lll} \hline heading1 & heading2& heading3 \\ \hline Table size & can be & edited \\ \hline \end{tabular} \label{tab1} \end{center} $^{a}$ Remarks or references regarding fields or data in the table. Use and adjust text to table width. $^{b }$Remarks or references regarding fields or data in the table. Use [Style: KONA\textunderscore Footnote] and adjust text to table width. \end{table} \subsection{Figure captions [Style: KONA Caption]} \textbf{Fig. 1 } Captions should be placed below each illustration, font Times New Roman, 9 pts, with 12 pt before and 12 pts after the paragraph. Figures and figure captions should be placed flush-left; two narrow figures may be placed side-by-side. Please reference figures in the text by writing: \textbf{Fig. }(do not use Figure)\textellipsis , reprinted with permission from Ref. (Tsuji et al., 1992). Copyright: (1992) Elsevier B.V. \section{Concerning references } In order to give our readers a sense of continuity, we encourage you to identify KONA articles of similar research in your papers. Please, do a literature check of the papers published in KONA in recent years at www.kona.or.jp. Concerning references type, the alphabetical system should be adopted. Please use reference management softwares such as all products that support \underline{Citation Style Language styles} {\small (http://citationstyles.org/)}, such as Mendeley and Zotero, as well as \underline{EndNote} {\small (https://endnote.com/)} to manage references as far as possible. \underline{Citation Style Language styles} {\small (https://www.\linebreak[2]zotero.org/styles?q=id\%3Akona-powder-and-particle-journal)} (supported by all reference management softwares written in CSL, such as Mendeley, and Zotero, Papers, and many others) arranged for KONA journal is recommended to use for the preparation of the paper. \underline{Endnote Style} {\small (https://endnote.com/style\_download/\linebreak[2]kona-powder-and-particle-journal/)} (within Endnote-reference management software) arranged for KONA journal is recommended to use for the preparation of the paper. Citation in the text to literature, is given by the surname and initial of the author(s) followed by the year of publication, e.g. "Tsuji Y. (1993) has reported ..., which was recently confirmed (Mori Y. and Fukumoto Y., 2002)." For references with more than two authors, text citations should be shortened to the first author followed by "et al.'', e.g. "Hidaka J. et al. (1995) have recently shown ...." However, in the list of References the names and initials of all authors should be mentioned. Just ``et al.'' is neither ethical nor politically correct. Two or more references by the same author published in the same year are differentiated by the letters a, b, c, etc. immediately after the year. The references should be listed in alphabetical order in the list of References. The articles in press should be used only if they have been accepted and have already allocated their DOI (Digital Object Identifier). When you are referencing conference proceedings, page numbers should be provided. If proceedings are not available, the lecture identification -- e.g. lecture number should be provided instead. When you are referencing websites, an author or authoring institution should be provided. The date of the last access should be provided as well. The hyperlinks (blue colour and underlining) should be removed from email addresses and web references. You do not need to repeat http:// as modern browsers do not require it. However the date of the last access should be always provided. \section*{Acknowledgements} This scientific work was partly financed from the budget for sciences in the years 2010-2013 as Research Project No. NN209023739, and was also supported by Cummins Filtration Ltd. \newpage \section*{Nomenclature} Symbols and units should be listed in alphabetical order with their definition and dimensions in SI units. \noindent \begin{tabular}{lp{17em}} AIT & Auto-Ignition Temperature (usually Minimum Auto-Ignition Temperature) \\ CCPS & Center for Chemical Process Safety (USA) \\ ISD & Inherently Safer Design \\ LOC & Limiting Oxygen Concentration (below which explosion is not possible) \\ PVC & Polyvinylchloride \\ STP & Standard Temperature and Pressure \\ $A$ & surface of filter sample (mm$^{2})$ \\ $D$ & particle size ($\mu$m) \\ $l$ & length (m) \\ $m$ & mass (kg) \\ $P$ & pressure (Pa) \\ $t$ & time (s) \\ $\Delta t$ & time duration of explosion \\ $T$ & temperature (K) \\ $V$ & volume of a vessel (L) \\ $\alpha$ & filter average packing density (-) \\ $\varepsilon$ & aggregate porosity (-) \\ $\varepsilon_{F}$ & filter porosity (-) \\ $\lambda$ & gas mean free path (m) \\ $\mu$ & gas viscosity (Pa s) \\ $\rho$ & solid concentration in the cluster (-) \\ $\rho_{g }$ & gas density (kg m$^{-3})$ \\ \end{tabular} \section*{References } Bonzel H.P., Bradshaw A.M., Ertl G., Eds., Physics and Chemistry of Alkali Metal Adsorption, Elsevier, Amsterdam, 1989. Iwakura Y., Fujisawa Y., Toyonaga N., Oral disintegrant tablet containing risperidone and method for producing the same, JP Patent, (2013) JP2013060392A. Hosokawa K., Yokoyama T., Kondo A., Naito M., Application 19---Mechanical Synthesis of Composite Oxide and Its Application for SOFC Cathode, in: Naito M., Yokoyama T., Hosokawa K., Nogi K. (Eds.), Nanoparticle Technology Handbook (Third Edition), Elsevier, 2018, pp.505-510, ISBN: 9780444641106. DOI: 10.1016/B978-0-444-64110-6.00026-3 Rico-Ramirez V., Napoles-Rivera F., Gonzalez-Alatorre G., Diwekar U., Stochastic optimal control for the treatment of a pathogenic disease, Chemical Engineering Transactions, 21 (2010) 217--222. DOI: 10.3303/CET11226001 Tsuji Y., Tanaka T., Ishida T., Lagrangian numerical simulation of plug flow of cohesionless particles in a horizontal pipe, Powder Technology, 71 (1992) 239--250. DOI: 10.1016/0032-5910(92)88030-L WWF (World Wide Fund for Nature), 2002, Living planet report <www.wwf.de> accessed 20.01.2011. Zhao X., Wang S., Yin X., Yu J., Ding B., Slip-effect functional air filter for efficient purification of PM2.5, Scientific Reports, 6 (2016) 35472. DOI: 10.1038/srep35472 \end{document}
{ "alphanum_fraction": 0.7544186606, "avg_line_length": 56.771331058, "ext": "tex", "hexsha": "03361f6a40fbe4bf55c1855770edd2b5c15eb8cc", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "28c9c88198a047c32996c9515f3e60675b42be23", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rubenandrebarreiro/tex-paper-articles-journals-templates", "max_forks_repo_path": "LaTeX/Overleaf/Academic Journals and Articles/KONA-Powder-and-Particle-Journal/Files/kona_template-2019.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "28c9c88198a047c32996c9515f3e60675b42be23", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rubenandrebarreiro/tex-paper-articles-journals-templates", "max_issues_repo_path": "LaTeX/Overleaf/Academic Journals and Articles/KONA-Powder-and-Particle-Journal/Files/kona_template-2019.tex", "max_line_length": 667, "max_stars_count": 6, "max_stars_repo_head_hexsha": "28c9c88198a047c32996c9515f3e60675b42be23", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rubenandrebarreiro/tex-paper-articles-journals-templates", "max_stars_repo_path": "LaTeX/Overleaf/Academic Journals and Articles/KONA-Powder-and-Particle-Journal/Files/kona_template-2019.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-11T05:33:59.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-12T00:51:08.000Z", "num_tokens": 4290, "size": 16634 }
\documentclass[../thesis.tex]{subfiles} \begin{document} \chapter{Introduction} \label{chap:Introduction} It is probably a good idea to split your thesis up into several files, so you can work on them individually. This is an example of such a file, which ends up in the main file but can be compiled by itself without all the other chapters. This will save you tons of time. \end{document}
{ "alphanum_fraction": 0.7493857494, "avg_line_length": 29.0714285714, "ext": "tex", "hexsha": "1deb5288f3aef450ca3455728f6ee76bb10802c8", "lang": "TeX", "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2022-02-04T12:51:10.000Z", "max_forks_repo_forks_event_min_datetime": "2016-04-07T18:53:10.000Z", "max_forks_repo_head_hexsha": "94717e0b34c1b39721e67f85ba15e70f0236057e", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "sp0500/latex-thesis-template", "max_forks_repo_path": "content/introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "94717e0b34c1b39721e67f85ba15e70f0236057e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "sp0500/latex-thesis-template", "max_issues_repo_path": "content/introduction.tex", "max_line_length": 75, "max_stars_count": 13, "max_stars_repo_head_hexsha": "94717e0b34c1b39721e67f85ba15e70f0236057e", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "sp0500/latex-thesis-template", "max_stars_repo_path": "content/introduction.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-19T17:56:55.000Z", "max_stars_repo_stars_event_min_datetime": "2015-02-11T15:02:59.000Z", "num_tokens": 106, "size": 407 }
\chapter{Conclusions and Further Work} \label{chap:concl} \section*{} We have developed a framework capable of predicting software defects from repositories, with a web-based graphical report. The creation of a learning mode for Schwa with genetic algorithms, gives researchers the ability of evaluating new features to extract from repositories, making Schwa a convenient framework to study Mining Software Repositories. Schwa should be combined with other techniques, since it is not completely accurate. Code review is an example of an activity that can benefit from this tool, allowing developers to focus in the most important components. The usage of Python allowed a fast prototyping of ideas due its simplicity and the existing of useful libraries. Mining Software Repositories is a time-consuming activity so research in this subject can benefit from the usage of clusters. \section{Goals satisfaction} We successfully created a defect prediction technique based on MSR approaches capable of learning features, until the method granularity for Java projects. Our initial goal of generalizing features weights was refuted by the experimental results, that shown that for each projects they are different. Although we did not improve the accuracy of Barinel, we have come with an alternative technique of computing defect probabilities in less time. For example, since Barinel for Joda Time can take 2 hours to run MLE, now with Schwa, this phase takes less that 1 minute, so it is a substantial achievement. \section{Further work} The technique used in Schwa for learning features can be improved with optimizations in the binary representation and code parallelization. There are plenty of improvements that can be done in Schwa: \begin{itemize} \item Support of more programming languages; \item Improve performance on extraction by developing a Python module in C; \item Add charts for revisions, fixes and authors evolution in the visualization, to support the results with more reasoning; \item Develop a SaaS platform for Schwa, similar to Codeclimate and Codacy. \end{itemize} MSR research could benefit of new techniques that reduce noise in the classification of bug-fixing commits, that can exploit issue trackers. Schwa could benefit from reducing this noise. With more computational power, we could evaluate with more examples, the gain of using Schwa in Crowbar, by finding an example where the diagnostic cost decreased.
{ "alphanum_fraction": 0.8192574459, "avg_line_length": 51.0625, "ext": "tex", "hexsha": "09aac3b3eaf1232a498641bfada8ab8428aa218e", "lang": "TeX", "max_forks_count": 9, "max_forks_repo_forks_event_max_datetime": "2021-02-07T02:53:17.000Z", "max_forks_repo_forks_event_min_datetime": "2015-05-14T09:31:15.000Z", "max_forks_repo_head_hexsha": "d09660e4b5bb665114c35ebe291e5620e59f4c4c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "XiaoxueRenS/schwa", "max_forks_repo_path": "thesis/conclusions.tex", "max_issues_count": 5, "max_issues_repo_head_hexsha": "d09660e4b5bb665114c35ebe291e5620e59f4c4c", "max_issues_repo_issues_event_max_datetime": "2021-07-20T08:29:16.000Z", "max_issues_repo_issues_event_min_datetime": "2021-01-12T09:57:36.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "XiaoxueRenS/schwa", "max_issues_repo_path": "thesis/conclusions.tex", "max_line_length": 82, "max_stars_count": 9, "max_stars_repo_head_hexsha": "d09660e4b5bb665114c35ebe291e5620e59f4c4c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "SBST-DPG/schwa", "max_stars_repo_path": "thesis/conclusions.tex", "max_stars_repo_stars_event_max_datetime": "2020-11-06T22:21:03.000Z", "max_stars_repo_stars_event_min_datetime": "2015-05-21T10:13:27.000Z", "num_tokens": 495, "size": 2451 }
There are language features in modern object-oriented programming languages, which do not exist in \eo{}, for example: multiple inheritance, annotations, encapsulation and information hiding, mixins and traits, constructors, classes, assertions, static blocks, aspects, NULL references, generics, lambda functions, exception handling, reflection, type casting, and so on. We assume that all of them may be represented with the primitive language features of \eo{}. There is no complete mapping mechanism implemented yet, but there are a few examples in this section that demonstrate how some features may be mapped from Java to \eo{}. \subsection{Inheritance} This Java code utilizes inheritance in order to reuse the functionality provided by the parent class \ff{Shape} in the child class \ff{Circle}: \begin{twocols} \begin{ffcode} abstract class Shape { |$\label{ln:java-shape}$| private float height; Shape(float h) { height = h; } float volume() { return square() * height; } abstract float square(); } |$\label{ln:java-shape-end}$| final class Circle extends Shape { |$\label{ln:java-cicle}$| private float radius; Circle(float h, float r) { super(h); radius = r; } float square() { return 3.14 * radius * radius; } }; |$\label{ln:java-circle-end}$| \end{ffcode} \end{twocols} The method \ff{volume} relies on the functionality provided by the abstract method \ff{square}, which is not implemented in the parent class \ff{Shape}: this is why the class is declared as \ff{abstract} and the method \ff{square} also has a modifier \ff{abstract}. It is impossible to make an instance of the class \ff{Shape}. A child class has to be define, which will inherit the functionality of \ff{Shape} and implement the missing abstract method. The class \ff{Circle} does exactly that: it \ff{extends} the class \ff{Shape} and implements the method \ff{square} with the functionality that calculates the square of the circle using the radius. The method \ff{volume} is present in the \ff{Circle} class, even though it is implemented in the parent class. This code would be represented in \eo{} as the following: \begin{ffcode} [child height] > shape [] > volume child.square.mul ^.height [height radius] > circle |$\label{ln:eo-circle}$| shape $ height > @ [] > square 3.14.mul radius.mul radius |$\label{ln:eo-circle-end}$| \end{ffcode} There is not mechanism of inheritance in \eo{}, but decorating replaces it with a slight modification of the structure of objects: the parent object \ff{shape} has an additional attribute \ff{child}, which was not explicitly present in Java. This attribute is the link to the object that inherits \ff{shape}. Once the \ff{volume} is used, the attribute refers to the child object and the functionality from \ff{circle} is used. The same mechanism is implemented in Java ``under the hood'': \eo{} makes it explicitly visible. \subsection{Classes and Constructors} There are no classes in \eo{} but only objects. Java, on the other hand, is a class-oriented language. In the snippet at the lines~\lrefs{java-shape}{java-circle-end}, \ff{Shape} is a class and a better way of mapping it to \eo{} would be the following: \begin{ffcode} [] > shapes [c h] > new # Some extra functionality here, which # stays in the class constructor in Java [] c > child h > height [] > volume child.square.mul ^.height \end{ffcode} Here, \ff{shapes} is the representation of Java class \ff{Shape}. It technically is a factory of objects. In order to make a new, its attribute \ff{new} must be used, which is similar to the operator \ff{new} in Java. The functionality of a Java constructor may also be implemented in the attribute \ff{new}, such as a validation of inputs or an initialization of local variables not passed through the constructor. \subsection{Mutability} All objects in \eo{} are immutable, which means that their attributes can't be changed after an object is created. Java, on the other hand, enables mutability. For example, both \ff{height} and \ff{radius} in the lines~\lrefs{java-shape}{java-circle-end} are mutable attributes, which can be modified after an object is instantiated. However, the attribute \ff{radius} of the \eo{} object \ff{circle} at the lines~\lrefs{eo-circle}{eo-circle-end} can't be modified. This may be fixed by using the object \ff{memory}: \begin{ffcode} [height r] > circle memory r > radius shape $ height > @ [] > square 3.14.mul radius.mul radius \end{ffcode} An instance of the object \ff{memory} is created when the object \ff{circle} is created, with the initial value of \ff{r}. Then, replacing the object stored in the \ff{memory} is possible through its attribute \ff{write}: \begin{ffcode} circle 1.5 42.0 > c c.radius.write 45.0 \end{ffcode} This code makes an instance of \ff{circle} with the radius of \ff{42.0}. Then, the radius is replaced with \ff{45.0}. \subsection{Type Reflection} There are no types in \eo{}, while Java not only have at least one type for each object, but also enable the retrieval of this information in runtime. For example, it is possible to detect the type of the shape with this code: \begin{ffcode} if (s instanceof Circle) { System.out.println("It's a circle!"); } \end{ffcode} In \eo{} this meta-information about objects must be stored explicitly in object attribute, in order to enable similar reflection on types: \begin{ffcode} [height radius] > circle "circle" > type # The rest of the object \end{ffcode} Now, checking the type of the object is as easy as reading the value of its attribute \ff{type}. The mechanism can be extended with more additional information during the transition from Java to \eo{}, such as information about attributes, decoratee, etc. \subsection{Exception Handling} There are no exceptions in \eo{}, but there are objects that can't be dataized. A traditional Java \ff{try/catch/finally} statements may be represented by an object \ff{try} provided by \eo{} runtime. For example, consider this Java code: \begin{ffcode} try { Files.write(file, data); } catch (IOException e) { System.out.println("Can't write to file"); } finally { System.out.println("This happens anyway"); } \end{ffcode} It may be translated to \eo{}: \begin{twocols} \begin{ffcode} try [] files.write > @ file data [] stdout > @ "Can't write to file" [] stdout > @ "This happens anyway" \end{ffcode} \end{twocols} Now, throwing an exception is returning an object that can't be dataized and handling the exception is checking for whether the object has $\varphi$ attribute or not. All of this is done by the object \ff{try}. \subsection{Control Flow Statements} Java has a few control flow statements, such as \ff{for}, \ff{while}, \ff{do}, \ff{if}, \ff{continue}, \ff{break}. They don't exist in \eo{}. However, \eo{} may have objects that implement the required functionality in runtime, often with the help of mutable objects: \begin{ffcode} while (i < 100) { if (i % 2 == 0) { System.out.prinln("even!"); } i++; } \end{ffcode} This code may be translated to \eo{} as the following: \begin{ffcode} [] memory > i while. > @ i.less 100 seq if. eq. (i.mod 2 0) stdout "even!" i.write (i.add 1) \end{ffcode} Here, \ff{while} and \ff{if} are the objects referred to as attributes of an object \ff{bool}, while \ff{i} is a mutable object.
{ "alphanum_fraction": 0.7166887417, "avg_line_length": 30.0796812749, "ext": "tex", "hexsha": "d0070c3ed9e384d0584374d24f3631b058e81ff2", "lang": "TeX", "max_forks_count": 71, "max_forks_repo_forks_event_max_datetime": "2021-01-14T08:21:09.000Z", "max_forks_repo_forks_event_min_datetime": "2016-11-08T14:54:36.000Z", "max_forks_repo_head_hexsha": "903535c625fe575a035a37d76c6d6c16e106f604", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "IngeniariusSoftware/eo", "max_forks_repo_path": "paper/sections/mappings.tex", "max_issues_count": 469, "max_issues_repo_head_hexsha": "903535c625fe575a035a37d76c6d6c16e106f604", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:48:11.000Z", "max_issues_repo_issues_event_min_datetime": "2021-01-14T14:39:58.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "IngeniariusSoftware/eo", "max_issues_repo_path": "paper/sections/mappings.tex", "max_line_length": 83, "max_stars_count": 493, "max_stars_repo_head_hexsha": "903535c625fe575a035a37d76c6d6c16e106f604", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "IngeniariusSoftware/eo", "max_stars_repo_path": "paper/sections/mappings.tex", "max_stars_repo_stars_event_max_datetime": "2021-01-14T10:48:30.000Z", "max_stars_repo_stars_event_min_datetime": "2016-11-07T08:22:37.000Z", "num_tokens": 1998, "size": 7550 }
\documentclass[notoc,notitlepage]{tufte-book} % \nonstopmode % uncomment to enable nonstopmode \usepackage{classnotetitle} \title{Linear Algebra} \author{Johnson Ng} \subtitle{Personal Study Notes} \credentials{BMath (Hons), Pure Mathematics major, Actuarial Science Minor} \institution{University of Waterloo} \input{latex-classnotes-preamble.tex} \begin{document} \hypersetup{pageanchor=false} \maketitle \hypersetup{pageanchor=true} \tableofcontents \chapter*{\faBook \enspace List of Definitions} \addcontentsline{toc}{chapter}{List of Definitions} \theoremlisttype{all} \listtheorems{defn} \chapter*{\faCoffee \enspace List of Theorems} \addcontentsline{toc}{chapter}{List of Theorems} \theoremlisttype{allname} \listtheorems{axiom,lemma,thm,crly,propo} \chapter*{Foreword}% \addcontentsline{toc}{chapter}{Foreword} \label{chp:foreword} % chapter foreword \begin{fullwidth} This is my attempt to revise on an old topic that I should be familiar with but not. \end{fullwidth} % chapter foreword (end) \appendix \backmatter \pagestyle{plain} \nobibliography* \bibliography{references} \printindex \end{document}
{ "alphanum_fraction": 0.786407767, "avg_line_length": 20.9814814815, "ext": "tex", "hexsha": "e0559f0fb72395be1af1cede24f316a0a82f43f9", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2017-09-27T20:55:58.000Z", "max_forks_repo_forks_event_min_datetime": "2017-09-27T20:55:58.000Z", "max_forks_repo_head_hexsha": "5814c8682addc5dd6f9a323758f87e4c4ca57b8e", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "japorized/TeX_notes", "max_forks_repo_path": "LinearAlgebra/classnotes.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "5814c8682addc5dd6f9a323758f87e4c4ca57b8e", "max_issues_repo_issues_event_max_datetime": "2021-03-29T17:58:51.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-29T17:58:51.000Z", "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "japorized/TeX_notes", "max_issues_repo_path": "LinearAlgebra/classnotes.tex", "max_line_length": 86, "max_stars_count": 5, "max_stars_repo_head_hexsha": "5814c8682addc5dd6f9a323758f87e4c4ca57b8e", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "japorized/TeX_notes", "max_stars_repo_path": "LinearAlgebra/classnotes.tex", "max_stars_repo_stars_event_max_datetime": "2020-11-21T01:41:27.000Z", "max_stars_repo_stars_event_min_datetime": "2017-09-28T21:23:05.000Z", "num_tokens": 337, "size": 1133 }
\section{Experiments} \label{sec:experiments} In this section, we evaluate in train, dev set. We use glove.6B.100d as our word embedding. And train our model for 140 epochs. We test our model in MSParS. \cite{MSParS} The MSParS dataSet is a QA set. From our understanding, in this dataSet, you should extract Named entities. And then it also need to find relation of this entities. After experiments, we get 66.08\% BLEU, 93.0\% accuracy in train set, and 65.63\% BLEU, 84.7\% accuracy in dev set.
{ "alphanum_fraction": 0.7529880478, "avg_line_length": 50.2, "ext": "tex", "hexsha": "2795540fc44fceeceac208965a144f482ada3971", "lang": "TeX", "max_forks_count": 3, "max_forks_repo_forks_event_max_datetime": "2020-04-13T05:51:26.000Z", "max_forks_repo_forks_event_min_datetime": "2019-12-30T00:59:21.000Z", "max_forks_repo_head_hexsha": "dd1f050d57ea831320ab6e4c2b827714cc13dfe3", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "iofu728/Task", "max_forks_repo_path": "semantic/task3/final_paper/sections/experiment.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "dd1f050d57ea831320ab6e4c2b827714cc13dfe3", "max_issues_repo_issues_event_max_datetime": "2019-05-23T14:37:42.000Z", "max_issues_repo_issues_event_min_datetime": "2019-05-23T11:10:50.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "iofu728/Task", "max_issues_repo_path": "semantic/task3/final_paper/sections/experiment.tex", "max_line_length": 170, "max_stars_count": 2, "max_stars_repo_head_hexsha": "dd1f050d57ea831320ab6e4c2b827714cc13dfe3", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "iofu728/Task", "max_stars_repo_path": "semantic/task3/final_paper/sections/experiment.tex", "max_stars_repo_stars_event_max_datetime": "2020-03-21T13:42:22.000Z", "max_stars_repo_stars_event_min_datetime": "2019-05-23T10:46:51.000Z", "num_tokens": 147, "size": 502 }
% !TeX program = pdfLaTeX \documentclass[12pt]{article} \usepackage{amsmath} \usepackage{graphicx,psfrag,epsf} \usepackage{enumerate} \usepackage{natbib} \usepackage{textcomp} \usepackage[hyphens]{url} % not crucial - just used below for the URL \usepackage{hyperref} \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} %\pdfminorversion=4 % NOTE: To produce blinded version, replace "0" with "1" below. \newcommand{\blind}{0} % DON'T change margins - should be 1 inch all around. \addtolength{\oddsidemargin}{-.5in}% \addtolength{\evensidemargin}{-.5in}% \addtolength{\textwidth}{1in}% \addtolength{\textheight}{1.3in}% \addtolength{\topmargin}{-.8in}% %% load any required packages here \usepackage{amsmath} \usepackage{amsfonts} \usepackage{booktabs} \usepackage{makecell} \usepackage[usenames, dvipsnames]{color} \usepackage{multirow} \usepackage{comment} \usepackage{booktabs} \usepackage{longtable} \usepackage{array} \usepackage{wrapfig} \usepackage{float} \usepackage{colortbl} \usepackage{pdflscape} \usepackage{tabu} \usepackage{threeparttable} \usepackage{threeparttablex} \usepackage[normalem]{ulem} \usepackage{xcolor} \newcommand{\beginsupplement}{\setcounter{table}{0} \renewcommand{\thetable}{S\arabic{table}}\setcounter{figure}{0} \renewcommand{\thefigure}{S\arabic{figure}}} \begin{document} \def\spacingset#1{\renewcommand{\baselinestretch}% {#1}\small\normalsize} \spacingset{1} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \if0\blind { \title{\bf Population pyramids yield accurate estimates of total fertility rates} \author{ Mathew E. Hauer \thanks{The location of all data and code for reproducing this analysis are available in Section 6. We thank A. Bronikowski, R. Lawler, and S. Alberts for their assistance with their primate data, and B. Jarosz and K. Devivo for feedback on earlier versions. Thanks y'all!} \\ Department of Sociology, Florida State University\\ and \\ Carl P. Schmertmann \\ Department of Economics, Florida State University\\ } \maketitle } \fi \if1\blind { \bigskip \bigskip \bigskip \begin{center} {\LARGE\bf Population pyramids yield accurate estimates of total fertility rates} \end{center} \medskip } \fi \bigskip \begin{abstract} The primary fertility index for a population, the total fertility rate (TFR), cannot be calculated for many areas and time periods because it requires disaggregation of births by mother's age. Here we discuss a flexible framework for estimating TFR using inputs as minimal as a population pyramid. We develop five variants, each with increasing complexity and data requirements. To evaluate accuracy we test using more than 2,400 fertility schedules with known TFR values, across a diverse set of data sources -- including the Human Fertility Database, Demographic and Health Surveys, U.S. counties, and nonhuman species. We show that even the simplest and least accurate variant has a median error of only 0.09 births/woman over 2,400 fertility schedules, suggesting accurate TFR estimation over a wide range of demographic conditions. We anticipate that this framework will extend fertility analysis to new subpopulations, time periods, geographies, and even species. To demonstrate the framework's utility in new applications, we produce subnational estimates of African fertility levels, reconstruct historical European TFRs for periods up to 150 years before the collection of detailed birth records, and estimate TFR for the U.S. conditional on race and household income. \end{abstract} \noindent% {\it Keywords:} indirect estimation, total fertility, Bayesian models \vfill \newpage \spacingset{1.45} % DON'T change the spacing! \hypertarget{introduction}{% \section{Introduction}\label{introduction}} Fertility is the primary engine of global population change \citep{gerland2014} and is central to the United Nations' Sustainable Development Goals for female education, child and maternal mortality, gender equality, and reproductive health \citep{abel16}. The total fertility rate (TFR) is a critical component of population change, and scientists and practitioners use it in a wide range of applications. The conventional technique for calculating \(TFR\) is straightforward, but requires data on births disaggregated by age of mother. This makes \(TFR\) incalculable for: (i) countries and regions that lack detailed birth records or quality survey data, (ii) historical populations that predate vital event registration, (iii) small-area populations for which reporting agencies mask birth records for privacy reasons, and (iv) any subpopulation not identified on official birth records, such as women in a specific income decile, religion, tribe, or occupation. The need for disaggregation of births by mother's age thus limits fertility analysis -- mainly to large populations in contemporary countries with good vital registration systems or country-periods with quality survey data on fertility. Demographers have proposed various indirect estimation techniques to circumvent these limitations \citep{bogue64, rele67}. However, these methods often rely on variables (mean age at marriage, percent of women ever married, etc.) that may be absent from census or survey data. Thus existing indirect methods, much like direct calculation of \(TFR\), are typically limited to areas, time periods, and populations with sufficiently detailed data. In addition, relationships between fertility and social indices can differ over time and over populations, making indirect methods error-prone when applied outside of the context from which regression coefficients were derived \citep{tuchfeld74, hauer13}. Here we discuss a flexible framework for estimating \(TFR\). We derive a suite of five \(TFR\) estimators that overcome the limitations above. Two of these estimators are based on previous work \citep{hauer13, schmertmann2019bayesian}; three derivations are new. Our framework uses census or survey counts of population by age and sex as inputs, and it exploits demographic relationships between \(TFR\) and population pyramids. Its principles are straightforward and well known. In previous research, we demonstrated that errors for this method tend to be smaller than those for other indirect methods \citep{hauer13} and that minor modifications using commonly-available data lead to further improvements \citep{schmertmann2019bayesian}. In this article we present several new variants and offer a robust evaluation of the entire framework across a wide range of demographic situations. We describe the framework's derivation and evaluate the accuracy of several variants over a wide variety of databases. We test the accuracy using known \(TFR\)s for 2,403 fertility schedules, spanning 124 years, across various scales, mortality regimes, and fertility levels. We also offer examples of \(TFR\) estimation for new types of data. \hypertarget{methods-and-materials}{% \section{Methods and Materials}\label{methods-and-materials}} \hypertarget{demographic-relationships-between-tfr-and-age-sex-distributions}{% \subsection{\texorpdfstring{Demographic Relationships between \(TFR\) and age-sex distributions}{Demographic Relationships between TFR and age-sex distributions}}\label{demographic-relationships-between-tfr-and-age-sex-distributions}} Using \(f(a)\) to denote the density of fertility at exact age \(a\), the period total fertility rate is \(TFR= \int_{\alpha}^{\beta}\,f(a)\,da\), which is usually approximated as \begin{equation} TFR = \: n\cdot \sum_{a=\alpha}^{\beta-n}F_a =\: n\cdot \sum_{a=\alpha}^{\beta-n}\frac{B_a}{W_a}, \end{equation} \noindent where \([\alpha,\beta)\) is the reproductive age range, \(W_a\) is the mid-year population of women in the \(n\)-year age interval \([a,a+n)\) (hereafter called \textit{age group a}), \(B_a\) is the annual number of births to those women, and \(F_a\) is their average fertility rate. Demographers commonly use \((\alpha,\beta,n)=(15,50,5)\), in which case there are seven age groups with fertility rates \(F_{15},F_{20},\ldots F_{45}\), and \(TFR=5\,\cdot\,\sum F_a\). Data for population pyramids is also reported for age groups, usually with \(n=5\). Analysis of relationships between \(TFR\) and the relative numbers of women and children by age group requires consideration of several demographic factors. First, not all children born during the previous \(n\) years will still be alive at the time a population is enumerated. Second, not all women who gave birth over the past \(n\) years will still be alive to be counted. Third, surviving women in a given \(n\)-year age group at the time of enumeration were only in that age group for a fraction of the past \(n\) years. These are all familiar considerations for demographers. As we demonstrate in another paper \citep{schmertmann2019bayesian}, a slight rearrangement of standard Leslie matrix formulas \cite[e.g.~][]{wachter2014essential} for age groups of width \(n=5\) shows that the expected number of surviving children under five, per surviving woman in age group \(a\) at the end of a five-year period is \begin{eqnarray} \label{eq:Ca-from-components} C_{a}\, & = & \,\,\left[\frac{L_{a-5}}{L_{a}}\,\cdot F_{a-5}\;+\; F_{a}\right]\,\,\frac{L_{0}}{2}\label{eq:Ca}\\ & = & \,\, TFR\,\cdot\frac{L_{0}}{5}\cdot\,\frac{1}{2}\,\left(\frac{L_{a-5}}{L_{a}}\,\cdot\phi_{a-5}\;+\;\phi_{a}\right)\nonumber \\ & = & \,\, TFR\,\cdot s\cdot\, p_{a}\nonumber \end{eqnarray} \noindent where \(\phi_a = \tfrac{5 F_a}{TFR}\) is the fraction of lifetime fertility occurring in age group \(a\) for a synthetic cohort subject to current period rates; \(L_a\) is expected person-years lived in age group \(a\) in a life table with a radix \(l_0=1\); \(s=\tfrac{L_0}{5}\) is the expected fraction still alive among children born in the past five years; \(W_a\) is the observed women in age group \(a\); and \(W\) is the total number of women enumerated at childbearing ages \([15, 50)\).\footnote{We assume that fertility is zero outside of this range, so $F_{10}=0$ in equation (\ref{eq:Ca-from-components}) when $a=15$.} \(C_a\) is the product of three multiplicative factors: \(TFR\), child survival \(s\), and an age-specific term \(p_a\) that represents the proportion of lifetime fertility experienced over the past five years by females in age group \(a\). The expected total number of surviving 0-4 year olds is therefore \begin{equation} C\,=\,\sum_{a=15}^{45}\, W_{a}\, C_{a}\,=\, W \cdot p \cdot s \cdot TFR \label{eq:expected total C} \end{equation} \noindent where \(p=\tfrac{1}{W}{\sum W_a p_a}\) is the population-weighted mean of \(p_a\) values. A more intuitive version of equation (\ref{eq:expected total C}) in terms of units is \begin{equation*} \underbrace{\text{Children 0--4}}_{C}\,=\, \underbrace{\text{women}}_{W} \cdot \underbrace{\tfrac{\text{births in last 5 yrs}}{\text{lifetime births}}}_{p} \cdot \underbrace{\tfrac{\text{surviving children 0--4}}{\text{births in last 5 yrs}}}_{s} \cdot \underbrace{\tfrac{\text{lifetime births}}{\text{woman}}}_{TFR} \label{eq:expected total C in words} \end{equation*} \noindent We then rearrange equation (\ref{eq:expected total C}) as an expression for TFR: \begin{equation} TFR\,=\,\frac{1}{s}\cdot\frac{1}{p}\cdot\frac{C}{W} \label{eq:s0TFR} \end{equation} \hypertarget{interpretation}{% \subsection{Interpretation}\label{interpretation}} The most fundamental assumption behind any estimators of period \(TFR\) derived from equation (\ref{eq:s0TFR}) is that the number of young children observed in an age pyramid can, after suitable correction for child mortality, serve as a proxy for recent births to the women who are counted in that same age pyramid. The ``age pyramid'' in question could come from a national census or from a regional disaggregation of national populations. But it could also come from a survey in which one can identify households in a chosen category -- for example by education of householder, geographic location, or total income. In the latter cases we count the young children and the reproductive-age women within households that belong to the chosen category. As long as women and their own children are counted in the same age pyramid, changes caused by internal migration (or more generally, caused by any kind of change of category) are not a major concern. The relationships in equation (\ref{eq:s0TFR}) still hold for open populations. Changes of category or location can affect the \textit{interpretation} of \(TFR\), however. Our measures use fertility over a five-year period, and cross-sectional input data comes from the \textit{end} of that period. If changes of status are related to fertility, then indices that condition on end-of-period status may not always capture the rates that we would most like to have. For example, if single women are likely to marry quickly after having children, then an estimator based on cross-sectional data of unmarried women would accurately estimate ``period fertility of women who end up unmarried'' but not ``period fertility of women while unmarried''. Similar issues of interpretation would arise if central city residents tended to move to suburbs a short time after becoming parents. These difficulties are conceptual rather than empirical, but for stratified subpopulations it is important to understand how conditioning on end-of-period status affects interpretation of \(TFR\): indices derived from equation (\ref{eq:s0TFR}) describe the \textit{recent} fertility of those in the chosen category \textit{at the time of the cross-sectional census or survey}. \hypertarget{five-tfr-estimators}{% \subsection{\texorpdfstring{Five \(TFR\) estimators}{Five TFR estimators}}\label{five-tfr-estimators}} We derive five methods for estimating period \(TFR\) from the demographic relationship in equation (\ref{eq:s0TFR}), each with different data inputs. \textbf{\autoref{methoddeployment}} shows the input data for each variant (called \(iTFR\), \(xTFR\), \(iTFR^+\), \(xTFR^+\), and \(bTFR\)). It is possible to use the \(iTFR\) and \(xTFR\) variants with age pyramid data only. If \(q_5\) estimates are also available, then it is possible to use \(iTFR^+\), \(xTFR^+\), and \(bTFR\). \textbf{iTFR} The simplest approximation to equation (\ref{eq:s0TFR}) assumes that child mortality is close to zero (\(s\approx 1\)) over the first \(n\) years of life, and that women are uniformly distributed over 35 years of reproductive ages (\(p\approx \tfrac{n}{\beta-\alpha}=\tfrac{5}{35}=\tfrac{1}{7}\)). Following \citep{hauer13} we call the resulting estimator the \emph{implied total fertility rate (iTFR)}: \begin{equation} \label{eq:iTFR} iTFR = \frac{\beta - \alpha}{n}\cdot \frac{C}{W} = 7\cdot\frac{C}{W}\nonumber \end{equation} \noindent For human populations divided into five-year age groups (\((\alpha,\beta,n)=(15,50,5)\)) then \(iTFR=7\cdot\tfrac{C}{W}\). For other species or other age combinations, the \(\tfrac{C}{W}\) multiplier may differ based on differences in \((\alpha,\beta,n)\). \begin{table}[] \centering \caption{\textbf{Characteristics of alternative $TFR$ estimators.} bTFR is a probabilistic, Bayesian version using statistical distributions for unknown demographic quantities. Other estimators are deterministic.} \label{methoddeployment} \begin{tabular}{llccl} & & \multicolumn{2}{c}{Adjust for child mortality?} & \\ & & No & Yes & \\ \cline{3-4} \multicolumn{1}{c}{Use age distribution detail} & \multicolumn{1}{l|}{No} & \multicolumn{1}{c|}{iTFR} & \multicolumn{1}{c|}{iTFR\textsuperscript{+}} & \\ \cline{3-4} \multicolumn{1}{c}{for women 15-49?} & \multicolumn{1}{l|}{Yes} & \multicolumn{1}{c|}{xTFR} & \multicolumn{1}{c|}{xTFR\textsuperscript{+}, bTFR} & \\ \cline{3-4} & & \multicolumn{1}{l}{} & \multicolumn{1}{l}{} & \\ \cline{1-5} \end{tabular} \end{table} \textbf{xTFR} Our second estimator uses details from the population pyramid to improve the approximation of the \(\frac{1}{p}\) term in equation (\ref{eq:s0TFR}). The \(iTFR\) formula uses \(\tfrac{1}{p}=7\), which is correct if women enumerated in the age-sex pyramid experienced a (mortality-adjusted) average of one-seventh of lifetime fertility over the previous five years. In practice this is not exactly true, because reproductive-age women may be concentrated in high- or low-fertility age groups. For example, if the age pyramid has a high concentration of women in their late 20s and early 30s, then typical age patterns of human fertility make it likely that they have just passed through five especially high-fertility ages, that \(p>\tfrac{1}{7}\), and that the multiplier \(\tfrac{1}{p}<7\). Conversely, a high concentration of women 40--49 in the age pyramid implies a multiplier \(\tfrac{1}{p}>7\). Although \(\tfrac{1}{p}=7\), as in \(iTFR\), often leads to small errors, it is possible to improve the estimator by using additional details from the population pyramid. In previous research \citep{schmertmann2019bayesian} we noted that the necessary adjustments can be large for small populations like U.S. counties that have substantial variations in the age distributions of reproductive-age women. In order to learn about the \(\tfrac{1}{p}\) multipliers, we examined 1,804 fertility schedules in the \citet[HFD,~][]{HFD} for which the true \(TFR\) is known. For each country \(c\) and time \(t\) we calculated the average \(TFR\) over the five previous years, \(TFR^\ast_{ct} = \tfrac{1}{5}\sum_{k=0}^4\,TFR_{c,t-k}\), and the empirical values of \(TFR^\ast\) divided by observed child-woman ratios \(\tfrac{C_{ct}}{W_{ct}}\). In other words, we calculated the multipliers necessary to convert \(\tfrac{C}{W}\rightarrow TFR^\ast\) under the assumption of negligible child mortality. The \(iTFR\) formula assumes that this multiplier equals seven. In the HFD these multipliers are within 10\% of seven (6.3-7.7) in 88.6\% of country-years. As in previous empirical examples \citep{schmertmann2019bayesian}, there is a notable correlation between the age distribution of women and the multiplier. Using the proportion of women 25--34 among those who are 15--49 (\(\pi_{25-34}\)) as a predictor in a simple regression with HFD data produces the approximation \(TFR^\ast_{ct}/({\tfrac{C_{ct}}{W_{ct}} ) } \approx 10.65 -12.55 \,\pi_{25-34}\), which we use to define the \emph{extended TFR} or \(xTFR\) estimator: \begin{equation} xTFR \, = \, \left( 10.65 -12.55\, \pi_{25-34}\right) \cdot \frac{C}{W} \label{eq:xTFR} \end{equation} \(xTFR\) adjusts for non-uniform distributions of women within reproductive ages\footnote{The coefficient values 10.65 and -12.55 are appropriate when $W$ includes women $[15,50)$. Researchers could use a similar regression procedure with HMD data to produce different coefficients for other definitions of the reproductive age span.}. For any given child-woman ratio, \(xTFR\) produces a lower estimate for lifetime fertility when women are more concentrated in high-fertility age groups. \begin{comment} As summarized in Table \ref{xTFR-vs-iTFR}, replacing $iTFR$ with $xTFR$ yields small but meaningful improvements in predicting total fertility. Over HFD schedules the interquartile range of estimation errors is narrower for $xTFR$, and the mean absolute percentage error falls from a respectable 5.4\% for $iTFR$ to an even better 3.9\% for $xTFR$. The numbers in the table below are from [xTFR vs iTFR.R] in the SIDE-ANALYSIS directory \begin{table} \centering \caption{Distribution of $TFR$ estimation Errors over HFD Schedules} \label{xTFR-vs-iTFR} \begin{tabular}{rrrrr} Percentile & \multicolumn{2}{r} Arithmetic Errors & \multicolumn{2}{r} Percent Errors \\ & $iTFR$ & $xTFR$ & $iTFR$ & $xTFR$ \\ \hline Minimum & -0.77 & -0.78 & -20.6 & -21.5 \\ 25\%ile & -0.08 & -0.05 & -3.9 & -2.3 \\ Median & 0.01 & 0.01 & 0.5 & 0.7 \\ 75\%ile & 0.09 & 0.06 & 5.5 & 3.6 \\ Maximum & 0.56 & 0.37 & 28.1 & 15.5 \\ Mean Absolute Error & 0.11 & 0.09 & 5.4 & 3.9 \\ \hline \end{tabular} \end{table} \end{comment} \textbf{\(\mathbf{iTFR^+}\) and \(\mathbf{xTFR^+}\)} Our derivations for \(iTFR\) and \(xTFR\) assume no child mortality, so that the survival multiplier in Equation \ref{eq:s0TFR} is (\(\frac{1}{s}\approx 1\)). These simple formulations are parsimonious and generally accurate for populations with low to moderate mortality. However, when mortality levels are higher it is logical that \(TFR\) would be underestimated and that errors would increase. Our third and fourth estimators, denoted \(iTFR^+\) and \(xTFR^+\), approximate the \(\tfrac{1}{s}\) component in equation (\ref{eq:s0TFR}) using estimated under-five mortality. Specifically, we use \(\tfrac{1}{s}\approx \tfrac{1}{1-0.75\,q_5}\), which is based on both logical and empirical relationships between life table variables. In 266 years of Swedish HMD data, with under-five survival rates ranging from 0.661 (in 1773) to 0.998 (in 2014), the approximation \(\hat{s}=1-0.75\,q_5\) had a correlation 0.999 with \(s\). Over these 266 years replacing true multipliers \(\tfrac{1}{s}\) with approximations \(\tfrac{1}{\hat{s}}\) in Equation (\ref{eq:s0TFR}) would produce very slight underestimates of \(TFR\), ranging from -3.3\% to -0.03\% with a median underestimate of -0.93\%. We call the variants that include simple adjustments for child mortality \(iTFR^+\) and \(xTFR^+\). Specifically, \begin{eqnarray} iTFR^+ \, & = & \left( \frac{7}{1-0.75\,q_5} \right) \cdot \frac{C}{W} \\ xTFR^+\, & = & \left( \frac{10.65 -12.55 \pi_{25-34}}{1-0.75\,q_5} \right) \cdot \frac{C}{W} \end{eqnarray} \textbf{A probabilistic model: \(\mathbf{bTFR}\)} Our fifth estimator, \(bTFR\), is a fully probabilistic Bayesian model. It uses details from the population pyramid about female age structure within reproductive ages, and it requires an estimate of under-five mortality. The Bayesian approach treats the number of children as a Poisson random variable, and treats the demographic quantities in Equations (\ref{eq:Ca-from-components}) and (\ref{eq:expected total C}) as uncertain. We derive prior distributions for the fertility and mortality patterns that determine \(p\) and \(s\) from large demographic databases \citep{HFD, HMD}. We define the \(bTFR\) estimator as the median of the marginal posterior distribution of a population's \(TFR\), conditional on observed \(C\) and \((W_{15}\ldots W_{45})\). Because they are probabilistic, \(bTFR\) estimators automatically produce uncertainty measures as well as point estimates. We describe this method in detail in another paper \citep{schmertmann2019bayesian}. We briefly summarize here. \emph{Fertility Parameters} The proportion of lifetime fertility experienced by women in the five years before a census or survey depends on their age distribution (which is observed), and on the relative levels of fertility in different age groups (which are uncertain). In order to model this uncertainty we decompose the fertility schedule for 5-year age groups into level and shape components, \((F_{15},...,F_{45})= \frac{TFR}{5} \cdot (\phi_{15},...,\phi_{45})\). As in \citep{schmertmann2019bayesian} we model the proportions \(\phi_{15}...\phi_{45}\) in terms of log odds, \(\gamma_{a}=\ln(\frac{\phi_a}{\phi_{15}})\) for \(a=15...45\) and then translate as \(\phi_a(\gamma)= \frac{\exp(\gamma_{a})}{\sum_{z}\exp(\gamma_{z})}\). By construction, these seven \(\phi_a\) values are positive and sum to one. Our model for the \(\gamma\) indices is \(\gamma=m+X\beta\) where \(m=(\begin{smallmatrix} 0 &1.39 &1.59 &1.23 &0.45 &-0.89 &-3.44\end{smallmatrix})^\prime\) and \(X= \left(\begin{smallmatrix}0 & 0.27 & 0.54 & 0.73 & 0.88 & 1.04 & 1.52\\0 & 0.32 & 0.51 & 0.51 & 0.35 & 0.05 & -0.72 \end{smallmatrix}\right)^{\prime}\) are constants derived from empirical data and \(\beta\) are unknown shape parameters.\footnote{The highest values in $m$ correspond to age groups $[20,25)$ and $[25,30)$, so that those age groups represent the highest shares of lifetime fertility. Because the first column of $X$ has monotonically increasing values, the first element of $\beta$ affects the mean age of childbearing: higher values raise the log odds of late relative to early fertility. Similarly, the second element of $\beta$ affects the variance of age-specific fertility, with higher $\beta_2$ causing higher concentration of fertility in the 20s and lower variance.} See Schmertmann and Hauer \citeyearpar{schmertmann2019bayesian} for details. When combined with a prior distribution \(\beta\sim N(0,I_2)\) this model assigns higher prior probabilities to fertility age patterns that are more similar to those in the HFD and the U.S. Census Bureau's International Database \citep{CensusIDB}. We use an uninformative prior for \(TFR\): \(TFR\sim \text{Uniform}(0,20)\). This allows the level of total fertility to be determined almost completely by the data, rather than by prior assumptions. \emph{Mortality Parameters} We model child and adult mortality with a relational mortality model \citep{wilmoth2012flexible}, in which two parameters describe the complete pattern of mortality rates by age: the probability of death before age five (\(q_5\)), and a shape parameter \(k\) with typical values between -2 and +2. The model uses fixed constants \(\left\{a_x,b_x,c_x,v_x\right\}\) derived from mortality schedules in the Human Mortality Database: \begin{equation} \ln\,\mu_{x}(q_{5},k) = a_{x}+b_{x}\,\left[\ln\, q_{5}\right]+c_{x}\,\left[\ln\, q_{5}\right]^{2}+v_{x}\, k\quad,\quad x=0,1,5,10 \ldots 45 \end{equation} \noindent We use standard demographic calculations to convert these log mortality rates into the \(L_{a}\) values in Equation (\ref{eq:Ca}). To account for possible errors in estimated mortality, we use a beta distribution for the true value of \(q_5\). Our prior is \(q_5 \sim Beta(a,b)\) with \(a\) and \(b\) such that \(P[q_{5}<\tfrac{1}{2}\,\min(\hat{q}_{5})]\:=\: P[q_{5}>2\,\max(\hat{q}_{5})]\:=\:.05\). This prior allows for a considerable amount of possible error in the \(q_5\) estimate used as input: it assigns a 90\% prior probability that the true value of \(q_5\) is between one-half and twice its estimated value. For the shape parameter \(k\) our prior is \(k \sim N(0,1)\). We assume \emph{a priori} that mortality parameters \(q_5\) and \(k\) are independent. \emph{Complete bTFR Model} Any set of parameters (\(TFR\), \(\beta\), \(q_5\), \(k\)) implies specific values \(C_a\) in equation (\ref{eq:Ca}). The expected number of surviving children to the \(W_{a}\) women observed in age group \(a\) is \(W_{a}C_{a}\), and the observed number of surviving children to these women is a Poisson random variable with mean \(W_{a}C_{a}.\)\footnote{A Poisson model assumes equality between the mean and variance of the number of surviving children. This strong assumption generally does not hold in practice \cite[e.g.~][Figure 1]{Barakat_36_26}. Individual heterogeneity in age-specific rates would tend to produce overdispersion (variance $>$ mean), while strong social norms about childbearing might tend to produce underdispersion (variance $<$ mean). In a comprehensive empirical study of cohort parity, Barakat \citeyear{bilal_barakat_revisiting_2014} provides evidence for effects in both directions. Underdispersion is more common at low parities, and vice versa. The good performance of the $bTFR$ estimator in our empirical tests suggests that the Poisson model is adequate for estimating $TFR$ from age-sex distributions.} Assuming statistical independence across maternal age groups, the total number of children also has a Poisson distribution: \begin{equation} C|W,TFR,\beta,q_5,k \sim Poisson \Bigg[ \sum_{a=15}^{45}W_a\, C_a(TFR,\beta,q_5,k) \Bigg] \label{eq:poisson distribution of C} \end{equation} The posterior distribution of parameters conditional on age pyramid data is therefore \begin{equation} P(TFR,\beta, q_5,k | C,W) \propto L(C|W,TFR,\beta,q_5,k) f_\beta(\beta)f_q(q_5)f_k(k) \label{eq:joint posterior} \end{equation} \noindent where the likelihood \(L\) is Poisson and the \(f\) functions represent the prior densities for unknown parameters. The flat prior for \(TFR\) does not affect the posterior distribution over the range \(TFR\in[0,20]\). The marginal posterior for TFR provides the relative probabilities of alternative fertility levels, given the number of children \(C\) and the counts of women \(W_{15}\ldots W_{45}\) in the observed age pyramid. We sample from the joint posterior distribution (\ref{eq:joint posterior}) using Markov Chain Monte Carlo (MCMC) methods, programmed in \textit{Stan} \citep{stanjss2017} using the \textit{rstan} package in \textit{R} \citep{rstanpackage, Rlanguage}, and we estimate the marginal posterior of \(TFR\) using the empirical density of sampled \(TFR\) values. \hypertarget{data}{% \section{Data}\label{data}} In order to evaluate the accuracy of the five estimators over as many different data situations as possible, we use four data sources that together comprise 2,403 fertility schedules. \hypertarget{human-fertility-databasehuman-mortality-database}{% \subsection{Human Fertility Database/Human Mortality Database}\label{human-fertility-databasehuman-mortality-database}} We first benchmark the estimators against the \citet{HFD}, the most complete and accurate dataset available on current and historical patterns of human fertility. The HFD covers fertility schedules for 31 countries between 1891 and 2015, containing 1,958 country-years of age-specific and total fertility rates. These are listed in \textbf{Supplementary Table 1}. For each country-year in the HFD, we link the corresponding population data (i.e., the age pyramid), and the \(q_5\) value from the \citet{HMD}. When joined, the HFD/HMD data includes true target values for \(TFR\) and input data (\(C, W_{15}\ldots W_{45}, q_5\)) for the five estimators. \hypertarget{demographic-and-health-surveys}{% \subsection{Demographic and Health Surveys}\label{demographic-and-health-surveys}} HFD/HMD data is highly accurate, but it covers a fairly narrow range of demographic conditions. HFD populations are mainly in contemporary developed countries with relatively low fertility and mortality rates. To evaluate the estimators under a broader set of conditions, we also test against Demographic and Health Survey (DHS) data. DHS data includes nationally representative household surveys for monitoring population health in 47 countries and will produce TFR estimates based on the sample. For 118 country-periods we use the DHS API \citep{DHSAPIcite} to download estimated \(TFR\), number of women aged 15-49, number of children under age 5, and \(q_5\). We use IPUMS-DHS \citep{IPUMSDHS} to download the corresponding numbers of women by five-year age group. The combined DHS data includes a (noisy but unbiased) estimate of the target value for \(TFR\), and the (\(C, W_{15}\ldots W_{45}, q_5\)) input data for the five estimators. \hypertarget{u.s.-county-fertility}{% \subsection{U.S. County Fertility}\label{u.s.-county-fertility}} Combining HFD/HMD and DHS data allows us to evaluate our methods at the national level. To evaluate the estimators at sub-national geographies, we use age-sex distributions from the 2010 Decennial Census to produce \(xTFR\) and \(iTFR\) estimates for every county in the U.S.. We evaluate the accuracy of these estimates by comparing to published county-level \(TFR\) values from the National Center for Health Statistics (NCHS), obtained via the Center for Disease Control (CDC)'s Wide Ranging Online Data for Epidemiological Research (WONDER) tool. The National Center for Health Statistics (NCHS) publishes highly accurate, sub-national fertility information in the United States. However, for privacy reasons the National Center for Health Statistics does not publish fertility information for U.S. Counties with populations less than 100,000. As a result, we can compare estimates to true county-level \(TFR\) values for only 524 of the approximately 3000 U.S. Counties. \hypertarget{nonhuman-data}{% \subsection{Nonhuman data}\label{nonhuman-data}} The basic \(iTFR\) variant only requires information on a population's age-sex structure. Population pyramids are not limited to humans, so \(iTFR\) could in principle be used to estimate total fertility in nonhuman populations if age-sex distributions were accurate. To test this proposition we assemble age-specific fertility and population data for eleven nonhuman species (eight wild and one captive primate species, one wild lion species, and one wild seal species) reported in \textbf{\autoref{nonhuman-datasources}}. These populations vary substantially, allowing us to assess whether the method works across species with very different levels and age patterns of fertility. In contrast to humans, for whom scientists typically demarcate menarche and menopause at ages 15 and 50 years, respectively, menarche among the eleven species ranges from a low of age 2 for Sifakas (\emph{P. verreauxi}) and African lions (\emph{P. leo}) to a high of 11 years for Chimpanzees (\emph{P. troglodytes}). Reproductive age spans range from a low of 7 years for Thomas Langurs (\emph{P. thomasi}) to a high of 38 years for Chimpanzees. These species display reproductive age spans, fertility schedules, and TFRs that differ greatly from humans. Demographic data for nonhuman populations is collected very differently from human data. Living individuals must be directly observed, their ages must be estimated, and their sex may or may not be known. Despite these differences, it is interesting to evaluate the effectiveness of estimating lifetime fertility from age structure for other species. \begingroup \renewcommand{\arraystretch}{0.75} \begin{table}[] \caption{\textbf{nonhuman fertility data.} $\alpha$ and $\beta$ are the first and last ages with observed non-zero fertility rates for each species. $_{1}P_0$ is the number of individuals less than one year old. $_{\beta-\alpha}W_{\alpha}$ is the number of females of reproductive age. $TFR$ is observed total fertility. Sources are (a) \citet{bronikowski16,bronikowskidata}, (b) \citet{wich2007demography}, (c) \citet{ha2000demographic}, (d) \citet{barlow1991modeling}, (e) \citet{packer1998reproductive}.} \label{nonhuman-datasources} \begin{tabular}{lrrrrrr} \cline{1-7} Species and Source & Location & $\alpha$ & $\beta$ & $_1P_0$ & $_{\beta-\alpha}W_\alpha$ & TFR \\ \cline{1-7} Sifaka (\textit{P. verreauxi})\textsuperscript{a} & Madagascar & 2 & 31 & 708 & 2073 & 10.67 \\ African Lion (\textit{P. leo})\textsuperscript{e} & Tanzania & 2 & 17 & 2643 & 3819 & 10.38 \\ Capuchin (\textit{C. capucinus})\textsuperscript{a} & Costa Rica & 5 & 24 & 189 & 423 & 9.82 \\ Baboon (\textit{P. cynocephalus})\textsuperscript{a} & Kenya & 4 & 23 & 1098 & 2465 & 9.37 \\ Muriqui (\textit{B. hypoxanthus})\textsuperscript{a} & Brazil & 7 & 40 & 376 & 1347 & 8.89 \\ Blue Monkey (\textit{C. mitis})\textsuperscript{a} & Kenya & 4 & 30 & 486 & 1542 & 8.69 \\ Gorilla (\textit{G. beringei})\textsuperscript{a} & Rwanda & 8 & 40 & 258 & 1049 & 8.00 \\ Macaque (\textit{M. nemestrina})\textsuperscript{c} & captive & 3 & 22 & 2368 & 7494 & 6.91 \\ Chimpanzee (\textit{P. troglodytes})\textsuperscript{a} & Tanzania & 11 & 49 & 189 & 1195 & 6.29 \\ Northern Fur Seal (\textit{C. ursinus})\textsuperscript{d} & -- & 3 & 19 & 2096 & 6436 & 6.10 \\ Thomas Langur (\textit{P. thomasi})\textsuperscript{b} & Indonesia & 4 & 11 & 161 & 305 & 3.67 \\ \cline{1-7} \end{tabular}\label{nonhumantable} \end{table} \endgroup \hypertarget{evaluation}{% \section{Evaluation}\label{evaluation}} \hypertarget{overall-error}{% \subsection{Overall Error}\label{overall-error}} We begin by investigating \(TFR\) estimation errors over all available test cases for human data. \textbf{\autoref{comparison}} reports the 10th, 50th, and 90th percentile of the absolute error (\(\left| est-obs \right|\)) for all methods in the HMD/HFD, DHS, and U.S. County datasets. All methods produce quite accurate results using all three data sources, for almost all of the populations under study. Only the \(xTFR\) variant for DHS data produces estimates with the median absolute error above 0.25 births/woman. Median absolute errors are less than 0.10 births/woman in most datasets. In general, more sophisticated estimators have lower average errors. Ordering by median errors shows that the simplest estimator, \(iTFR\), has the poorest (although still very good) performance, that adding age structure detail and/or mortality corrections (\(xTFR\), \(iTFR^+\), \(xTFR^+\)) reduces errors further, and that most complex model (\(bTFR\)) has the smallest average errors. There is one important exception to the general ``fancier is better'' rule. In populations with high mortality and fertility rates -- exemplified in our data by DHS countries -- adding details about the age distribution of reproductive-age women while ignoring child mortality (i.e., changing \(iTFR \rightarrow xTFR\)) actually makes estimates worse. Why? Recall from Equation (\ref{eq:s0TFR}) that the true \(\tfrac{C}{W}\rightarrow TFR\) multiplier has two parts: \(\tfrac{1}{s}\) for child mortality correction, and \(\tfrac{1}{p}\) for age structure correction. \(iTFR\) assumes \(\tfrac{1}{s}=1\) and \(\tfrac{1}{p}=7\). In a high-mortality, high-fertility population the true values of the two components actually tend to be greater than 1 (more than one birth per living child) and less than 7 (children born recently represent less than 1/7 of the population's lifetime births, because there are relatively few old women). In such situations \(iTFR\) underestimates one component of the multipler (\(\tfrac{1}{s}\)) and overestimates the other (\(\tfrac{1}{p}\)), while \(xTFR\) underestimates (\(\tfrac{1}{s}\)) and gets (\(\tfrac{1}{p}\)) about right. Compensating errors for \(iTFR\) mean that it actually performs better than the more sophisticated \(xTFR\) approach in a high-mortality, high-fertility setting. This leads to one important caveat. If mortality estimates are unavailable, then \(iTFR\) is preferable to \(xTFR\) in a population with suspected high mortality. \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/plot-comparisons-over-all-data-1.png} \caption{\textbf{Absolute Error (Births/Woman) in TFR over alternative methods and datasets.} Solid dots are at median Error for each method and dataset. Horizontal bars extend from the 10th-90th percentile of Error. Numbers in parentheses indicate the count of schedules for which it is possible to use each method. \label{comparison}} \end{figure} \begin{figure} {\centering \includegraphics[width=0.6\linewidth]{manuscript_files/figure-latex/plot-nonhuman-estimates-1} } \caption{\textbf{Fertility estimates for animal populations}. $iTFR$ estimates from nonhuman age-sex distributions. Observed TFR versus $iTFR$ estimates for the species listed in \autoref{nonhumantable}. Estimates match observations along the 45-degree line; dashed lines represent +/- 10\% errors.\label{primates}}\label{fig:plot-nonhuman-estimates} \end{figure} \hypertarget{errors-for-nonhuman-species}{% \subsection{Errors for Nonhuman Species}\label{errors-for-nonhuman-species}} To test the generalizability of the method, we examine the accuracy of the \(iTFR\) estimator in 11 nonhuman populations described in (\textbf{\autoref{nonhumantable}}). We find that \(iTFR\) accurately estimates total fertility among these species (\textbf{\autoref{primates}}). These results suggest the method captures fundamental properties that govern mammalian fertility, and that it could be valuable in other studies with nonhuman populations. \hypertarget{sensitivity-to-tfr-level-population-size-and-time-period}{% \subsection{Sensitivity to TFR Level, Population Size, and Time Period}\label{sensitivity-to-tfr-level-population-size-and-time-period}} \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/plot-multipanel-summary-1.png} \caption{\textbf{Estimated TFR from Population Pyramids.} Performance of three variants in HFD and DHS data. (a, d, g) use \(iTFR\); (b, e, h) use \(xTFR\); (c, f, i) use \(bTFR\); (a, b, c) plot estimated TFR against the observed 5-year average TFR. The solid line is \(Y=X\), and the dashed lines are \(\pm 10\%\). (d, e, f) illustrate percent error against population size. The dashed lines represent errors of \(\pm 10\%\). (g, h, i) plot percent errors against the year in which the population pyramid is observed. (j) plots the distribution of algebraic errors for each method (\(est-obs\)). (k) plots the distribution of absolute algebraic errors. (l) plots the distribution of absolute percent errors. For all variants, estimates are accurate over many scales and times. \label{multipanel-summary}} \end{figure} \textbf{\autoref{multipanel-summary}} reports the accuracy of \(iTFR\), \(xTFR\), and \(bTFR\) estimators for the HFD/HMD and DHS data combined for different levels of TFR, different population sizes, and different historical periods. \textbf{\autoref{error-table}} reports summary measures of the accuracy for all five estimators using the same combined HFD/HMD and DHS data as well as U.S. counties. Overall, we find good agreement between estimated and observed TFRs for all five estimators (\textbf{\autoref{multipanel-summary}}, a, b, c and \textbf{\autoref{error-table}}. Demographic estimators are typically more accurate for larger populations (due to the law of large numbers) and for more recent time periods (due to improved data collection practices). However, we find that error rates are independent of population size (\textbf{\autoref{multipanel-summary}}, d, e, f) and are fairly stable across time (\textbf{\autoref{multipanel-summary}}, g, h, i), suggesting scale and temporal independence uncommon in other indirect methods. Even the simplest and least accurate of the five variants, \(iTFR\), predicts the total fertility rate with absolute errors of less than 0.09 births/woman in half of the HFD and DHS populations, and less than 0.26 births/woman in 90\% of the populations (\textbf{\autoref{error-table}}). Absolute percent errors for \(iTFR\) are also quite small relative to most indirect demographic estimators: 50\% of errors are within \(4.6\)\% of the true TFR, and 90\% are within \(10.8\)\%. As shown in \textbf{\autoref{multipanel-summary}} and \textbf{\autoref{error-table}}, the additional information contained in the \(xTFR\) and \(bTFR\) estimators produce even smaller errors. In short, we find that for national populations in countries with accurate data and (mostly) low mortality, accurate \(TFR\) estimates from population pyramids are possible. \begin{table}[t] \caption{\label{tab:unnamed-chunk-2}\label{error-table}\textbf{Summary statistics for the five variants using data from the HMD/HFD and DHS and U.S. counties.} APE is the Absolute Percent Error.} \centering \resizebox{\linewidth}{!}{ \begin{tabular}{llrllll} \toprule Data & Method Family & n & 50\%ile Absolute Error & 90\%ile Absolute Error & 50\%ile APE & 90\%ile APE\\ \midrule & iTFR & 118 & 0.23 & 0.52 & 4.7\% & 10.5\%\\ & iTFR+ & 93 & 0.20 & 0.53 & 4.0\% & 11.1\%\\ & xTFR & 43 & 0.39 & 0.90 & 8.4\% & 14.0\%\\ & xTFR+ & 43 & 0.15 & 0.29 & 2.7\% & 6.6\%\\ \multirow{-5}{*}{\raggedright\arraybackslash DHS} & bTFR & 43 & 0.13 & 0.29 & 2.3\% & 5.3\%\\ \cmidrule{1-7} & iTFR & 1750 & 0.09 & 0.24 & 4.6\% & 10.7\%\\ & iTFR+ & 1750 & 0.08 & 0.22 & 4.4\% & 10.3\%\\ & xTFR & 1750 & 0.06 & 0.19 & 3.0\% & 8.2\%\\ & xTFR+ & 1750 & 0.06 & 0.17 & 2.9\% & 7.4\%\\ \multirow{-5}{*}{\raggedright\arraybackslash HMD/HFD} & bTFR & 1750 & 0.05 & 0.15 & 2.6\% & 6.6\%\\ \cmidrule{1-7} & iTFR & 524 & 0.13 & 0.26 & 6.5\% & 12.8\%\\ \multirow{-2}{*}{\raggedright\arraybackslash US counties} & xTFR & 524 & 0.07 & 0.21 & 3.3\% & 10.1\%\\ \bottomrule \end{tabular}} \end{table} \hypertarget{errors-in-subnational-estimates}{% \subsection{Errors in Subnational Estimates}\label{errors-in-subnational-estimates}} It is possible that coefficients derived at the National level could prove ineffective in estimating fertility at the subnational level (eg. \citet{tuchfeld74}). \textbf{\autoref{error-table}} reports the errors associated with both the \(iTFR\) and \(xTFR\) methods for U.S. counties that also have corresponding observed total fertility rates. These errors are on par with the errors observed using the HMD/HFD data, demonstrating important consistency in low error rates across a variety of scales. \hypertarget{sensitivity-to-child-mortality}{% \subsection{Sensitivity to Child Mortality}\label{sensitivity-to-child-mortality}} \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/plot-errors-by-q5-1.png} \caption{\textbf{Absolute Percent Errors against \(q_5\) values in the HMD and DHS.} We compare the performance of the five variants against observed \(q_5\) mortality rates. As \(q_5\) values increase, APE also increases in the \(iTFR\) and \(xTFR\) variants. This is corrected in the \(iTFR^+\), \(xTFR^+\), and \(bTFR\) variants, which incorporate estimated child mortality. \label{errors-by-q5}} \end{figure} Our derivations for the \(iTFR\) and \(xTFR\) assume that child mortality is negligible, and those estimators are likely to underestimate fertility when it is not. We show this relationship in \textbf{\autoref{errors-by-q5}} using data from the HMD/HFD and DHS, where \(iTFR\) and \(xTFR\) produce increasing errors as child mortality increases. Notice that the \(iTFR^+\), \(xTFR^+\), and \(bTFR\) variants largely correct for higher mortality levels. Thus, in countries or periods with high infant and child mortality, the \(iTFR^+\), \(xTFR^+\), and \(bTFR\) variants are more appropriate. \hypertarget{sec:extensions}{% \subsection{Extensions}\label{sec:extensions}} Accurate estimation of \(TFR\) from age-sex pyramids greatly expands our ability to estimate fertility across varying geographies, time periods, and subpopulations. To demonstrate the flexibility of the method, we produce TFR estimates for three cases in which direct \(TFR\) estimation is impossible. \emph{Subnational Fertility in Africa} We use data from WorldPop's gridded population age-structure data for the year 2015 \citep{tatem2013millennium} in conjunction with \(q_5\) estimates from the United Nations World Population Prospects 2017 \citep{united2017world} to produce subnational estimates of \(iTFR^+\) for Africa. Scholars have increasingly published subnational estimates of demographic indicators to monitor progress towards the UN's Sustainable Development Goals (SDGs) \citep{osgood2018mapping, golding2017mapping, graetz2018mapping} and our framework allows a nearly unprecedented level of geographic detail regarding fertility (\textbf{\autoref{WorldPop}}). \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/plot-africa-map-1.png} \caption{\textbf{Estimating subnational fertility rates.} We use the \(iTFR^+\) method to estimate subnational total fertility rates for the African continent for 2010-2015 using data from WorldPop and the United Nations WPP 2017. \label{WorldPop}} \end{figure} \emph{Historical Fertility in Europe} We also extend our analysis of the Human Mortality Database by producing fertility estimates for all 2955 country-years of data in the HMD (an additional 1000 country-years' of estimates prior to the collection of detailed birth records). From this large historic volume of data, we highlight our findings in four example countries: France, Italy, the Netherlands, and Sweden (\textbf{\autoref{Historic}}). Sweden began tabulating the detailed birth records necessary for \(TFR\) calculation in 1891, France in 1946, the Netherlands 1950, and Italy in 1954. However, these countries collected both mortality and age-sex data considerably earlier (1751 for Sweden, 1816 for France, 1850 for the Netherlands, and 1872 for Italy). By using the \(bTFR\) method, we can reconstruct historical TFRs to create a time series of fertility data well before age-specific birth collection began, significantly expanding our ability to explore historical fertility patterns from up to 250 years ago. \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/plot-historical-hmd-estimates-1.png} \caption{\textbf{Estimating historical fertility.} \(bTFR\) estimates of period fertility rates in four European countries using HMD historical age-sex and child mortality data. Shaded regions represent 90 percent posterior probability intervals; open circles are observed TFRs from the HFD; vertical dashed lines refer to the earliest data in the HFD. Red stars are average TFRs over the preceding five-year period for the earliest series with a vital records source from the Human Fertility Collection \citep{HFC}.\label{Historic}} \end{figure} \emph{Fertility by Income and Race in the United States} The complex connections between fertility and household income have always interested demographers \citep{becker1960economic, jones2008fertility}. However, analysis is limited because birth certificates do not include economic information, and very few surveys include both income and fertility questions. Estimation of fertility levels from population composition can expand our ability to learn about fertility-income correlations in different social groups. (\textbf{\autoref{Income}}) provides an example. We use the \(xTFR\) method to estimate TFRs conditional on both race (White/Non-White) and household income level in the United States, using data on the age-sex composition of households in different income strata from the Current Population Survey (CPS). Data for this figure comes from aggregated 2010--2018 March Economic Supplements, downloaded from IPUMS \citep{IPUMSCPS}. Indirect methods based on age-sex data allow us to produce estimates of \(TFR\) by income groups, and to further disaggregate by race. Among both Whites and Non-Whites, there is a definite U-shaped relationship, with the highest fertility levels in the poorest and richest American households. Here we provide these examples only as a proof of concept: indirect fertility estimation identifies intriguing relationships that would not be estimable by other means, and which clearly merit further study. \begin{figure} \centering \includegraphics{manuscript_files/figure-latex/resample-CPS-income-race-1.png} \caption{\textbf{Total Fertility (\(xTFR\)) by race and household income level.} We estimate TFR from age-sex distributions within race-income categories using the U.S. Current Population Survey (CPS) March Social and Economic Supplements, combined over 2010-2018. White corresponds to those reporting their race as White only; Non-White corresponds to all other survey respondents. Shaded regions represent confidence intervals (5\%-95\%) estimated from 1000 bootstrap samples in which households in each race-income category are drawn randomly with replacement from the set of all CPS households. \label{Income}} \end{figure} \hypertarget{conclusion}{% \section{Conclusion}\label{conclusion}} We have examined our framework's errors using high-quality age pyramids and fertility data. With a few minor caveats, we conclude that indirect estimators yield accurate \(TFR\) estimates when age-sex input data is accurate. Researchers should use caution, however, when applying our framework in data environments with potentially deficient inputs. The methods outlined above produce good estimates of \(TFR\), but they require accurate age-sex counts. Underenumeration of children aged 0-4 is a well-known problem with census data \citep{ewbank1981age, o2014historical, o2015coverage}, and misreporting of children aged 0-4 will yield inaccurate estimates of total fertility in our framework. Future extensions could adjust for underenumeration, either by adding an undercount multiplier to Equation (\ref{eq:s0TFR}), or by including a prior distribution for undercount in the \(bTFR\) model. Demographers should use expert judgement when choosing which variant to employ. For instance, in populations with known or suspected high child mortality one should likely estimate \(TFR\) using \(iTFR^+\), \(xTFR^+\), or \(bTFR\) to correct for child mortality (if \(q_5\) estimates are available). In choosing the three example extensions in \autoref{sec:extensions}, we chose the variant to fit both the data available and known demographic characteristics of the population. We know Africa tends to have high child mortality but we are also cautious about the accuracy of WorldPop's age-structure data. This led us to \(iTFR^+\), which corrects for child mortality but does not use the detailed age distribution for women of childbearing ages. Similarly, the HMD contains high quality data but the further back in time one goes the more important uncertainty about child mortality becomes. In this situation \(bTFR\) is the appropriate method. To estimate U.S. fertility conditional on household income and race, we do not have child mortality available for these populations. However, U.S. child mortality is quite low, and for small subpopulations the age structure among reproductive-age women is quite variable. These considerations make \(xTFR\) the most appropriate estimator. Scholars should use similar judgements when using our framework to answer their own research questions. Our framework's flexible data requirements allow researchers to select the variant that conforms to the available data, and offers high confidence for estimates. As we show in our example extensions, this approach opens the door to fertility analyses for many populations of interest to sociologists \citep{lesthaeghe14}, economists \citep{hotz1997economics}, anthropologists \citep{greenhalgh1995situating}, epidemiologists \citep{paulson2002pregnancy}, historians \citep{woods2000demography}, and population geographers \citep{sorichetta2015high}. There is now increased demand for high-resolution gridded population datasets for climate change research and SDGs \citep{jones15, cincotta00, golding2017mapping}. Because our methods work well for small populations, scientists could use them to estimate small-area fertility levels and changes, as inputs to gridded population projections or for gridded fertility level datasets. Because they rely on basic, commonly collected census data the parameter-free, scale-, time-, and species-robust techniques can estimate total fertility even in areas without detailed demographic data. We anticipate this estimation framework will open new lines of inquiry into human fertility patterns. \hypertarget{replication-files}{% \section{Replication Files}\label{replication-files}} All data and code necessary to reproduce the reported results are licensed under the CC-BY-4.0 license and are publicly available in a replication repository located at (\url{https://github.com/mathewhauer/iTFR-replication}). The analyses were performed in \emph{R} \citep{rcite}. For the \(bTFR\) variant we use the \emph{rstan} package \citep{rstanpackage} to inferface with Stan \citep{stanjss2017}. \bibliographystyle{agsm} \bibliography{mybibfile} \end{document}
{ "alphanum_fraction": 0.7432830555, "avg_line_length": 51.9093406593, "ext": "tex", "hexsha": "831a0855a121e184c1325ff426203c139de9a1bc", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-03-07T07:45:14.000Z", "max_forks_repo_forks_event_min_datetime": "2021-03-07T07:45:14.000Z", "max_forks_repo_head_hexsha": "2930655e519010576d6497bc0205e113188ba4b7", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rganly/iTFR-replication", "max_forks_repo_path": "MANUSCRIPT/manuscript.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "2930655e519010576d6497bc0205e113188ba4b7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rganly/iTFR-replication", "max_issues_repo_path": "MANUSCRIPT/manuscript.tex", "max_line_length": 809, "max_stars_count": 3, "max_stars_repo_head_hexsha": "2930655e519010576d6497bc0205e113188ba4b7", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rganly/iTFR-replication", "max_stars_repo_path": "MANUSCRIPT/manuscript.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-06T12:22:52.000Z", "max_stars_repo_stars_event_min_datetime": "2020-01-29T14:37:27.000Z", "num_tokens": 15418, "size": 56685 }
\subsection{Results} \label{subsec:results_logistic_regression} Using scikit's grid search functionality it can be shown that both a rbf and a linear kernel with $C=1$ are a good choice for the support vector machine. Therefore, we will use a linear kernel and set $C=1$. \begin{table}[H] \begin{tabular}{@{}lllll@{}} \toprule Classification Method & TP (train) & TN (train) & TP (test) & TN (test) \\ \midrule logistic sgd & 0.94 & 0.99 & 0.90 & 0.97 \\ logistic sgd cv & 0.95 & 0.98 & 0.93 & 0.99 \\ logistic scikit & 0.94 & 0.99 & 0.90 & 1 \\ logistic scikit cv & 0.92 & 0.99 & 0.90 & 1 \\ svm & 0.96 & 1 & 0.94 & 0.97 \\ svm cv & 0.96 & 1 & 0.93 & 1 \\ \bottomrule \end{tabular} \caption{Performance of each classification method in terms of the true positive (TP) and true negative (TN) percentage for training and test data. We use the following hyperparameters: $\lambda=0.001$, learn rate=0.001, batchsize=1, epoch=1000, test ratio=0.1, k-fold=5.} \label{table:1} \end{table} From table \ref{table:1} it shows that SVM is the best performing classification algorithm. SVM is known to be capable of separating overlapping class distribution \cite{bishop2006pattern}. Logistic regression, on the other hand, requires too many hyperparameters that finding the best set of hyperparameters is another task aside from optimizing the learnable parameters. We can say that all methods perform well in classifying benign classes and perform average on classifying cancer, i.e. malignant tumors. \begin{figure}[H] \centering \includegraphics[width=1\linewidth]{Images/epoch100.png} \caption{Accuracy variation as a function of the learning rate for the case with the following hyperparameter: $\lambda=0.001$, batch size=32, epoch=100, test ratio=0.2} \label{fig:epoch100} \end{figure} To study the effect of tuning parameters in the performance of the model we examine the following cases: effect of the number of epoch and effect of the number of batches. In figure \ref{fig:epoch100} we see that the model starts to overfit as the learning rate increases, it is evident in the result of the testing accuracy. It is a result of a small number of iteration. One can improve the model by increasing the number of iterations, as seen in \ref{fig:epoch1000}, which starts performing well as a function of the learning rate. \begin{figure}[H] \centering \includegraphics[width=1\linewidth]{Images/epoch1000.png} \caption{Accuracy variation as a function of the learning rate for the case with the following hyperparameter: $\lambda=0.001$, batch size=32, epoch=1000, test ratio=0.2} \label{fig:epoch1000} \end{figure} We explore also the effect of mini-batches size. The number of mini-batches determines the speed of the gradient search and thus helps minimize the chances of being stuck in a local minimum. This can be seen in figure \ref{fig:MS1}-\ref{fig:MS32} in both mini-batch cases. At standard stochastic gradient descent case (batch size=1) the accuracy increase is slow, as shown in the test accuracy of figure \ref{fig:MS1}, compared that to the second case where the mini-batch size is 32 shown in the test accuracy of figure \ref{fig:MS32}, the change of accuracy improves drastically. Basically, the batch size affects how quickly the model optimizes the learnable parameters. \begin{figure}[H] \centering \includegraphics[width=1\linewidth]{Images/MS1.png} \caption{Accuracy variation as a function of the learning rate for the case with the following hyperparameter: $\lambda=0.001$, batch size=1, epoch=1000, test ratio=0.2} \label{fig:MS1} \end{figure} \begin{figure}[H] \centering \includegraphics[width=1\linewidth]{Images/MS32.png} \caption{Accuracy variation as a function of the learning rate for the case with the following hyperparameter: $\lambda=0.001$, batch size=32, epoch=1000, test ratio=0.2} \label{fig:MS32} \end{figure}
{ "alphanum_fraction": 0.7076332213, "avg_line_length": 71.8275862069, "ext": "tex", "hexsha": "24e54f01e777fff720b1a897881ff5875a94a161", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-11-17T10:51:25.000Z", "max_forks_repo_forks_event_min_datetime": "2021-11-17T10:51:25.000Z", "max_forks_repo_head_hexsha": "098363c47c9409d6ffce1d03a968b6f2265c5fcc", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "am-kaiser/CompSci-Project-1", "max_forks_repo_path": "documentation/report/sections/results_logistic_regression.tex", "max_issues_count": 9, "max_issues_repo_head_hexsha": "098363c47c9409d6ffce1d03a968b6f2265c5fcc", "max_issues_repo_issues_event_max_datetime": "2021-12-16T19:51:18.000Z", "max_issues_repo_issues_event_min_datetime": "2021-11-01T08:32:11.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "am-kaiser/CompSci-Project-1", "max_issues_repo_path": "documentation/report/sections/results_logistic_regression.tex", "max_line_length": 674, "max_stars_count": null, "max_stars_repo_head_hexsha": "098363c47c9409d6ffce1d03a968b6f2265c5fcc", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "am-kaiser/CompSci-Project-1", "max_stars_repo_path": "documentation/report/sections/results_logistic_regression.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1104, "size": 4166 }
\chapter{Angle Network Performance} \label{appendix:angle_perf} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_1.pdf} \caption{Angle Network Performance -- 1 Episode} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_21.pdf} \caption{Angle Network Performance -- 21 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_41.pdf} \caption{Angle Network Performance -- 41 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_61.pdf} \caption{Angle Network Performance -- 61 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_81.pdf} \caption{Angle Network Performance -- 81 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_101.pdf} \caption{Angle Network Performance -- 101 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_121.pdf} \caption{Angle Network Performance -- 121 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_141.pdf} \caption{Angle Network Performance -- 141 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_161.pdf} \caption{Angle Network Performance -- 161 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_181.pdf} \caption{Angle Network Performance -- 181 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_201.pdf} \caption{Angle Network Performance -- 201 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_221.pdf} \caption{Angle Network Performance -- 221 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_241.pdf} \caption{Angle Network Performance -- 241 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_261.pdf} \caption{Angle Network Performance -- 261 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_281.pdf} \caption{Angle Network Performance -- 281 Episodes} \end{figure} \begin{figure}[H] \centering \includegraphics[width=6in, height=3.85in, keepaspectratio]{figures/train_figs/angle_transitions/0_301.pdf} \caption{Angle Network Performance -- 301 Episodes} \end{figure}
{ "alphanum_fraction": 0.7901531072, "avg_line_length": 40.1325301205, "ext": "tex", "hexsha": "73512dc43a22e78f285afe5573a0a64f7d1dcce8", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "34d8fa51198be3b0f8d10982b69b79d277682638", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "okayjustin/roborodentia2017", "max_forks_repo_path": "report/appendices/angle_perf.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "34d8fa51198be3b0f8d10982b69b79d277682638", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "okayjustin/roborodentia2017", "max_issues_repo_path": "report/appendices/angle_perf.tex", "max_line_length": 108, "max_stars_count": 1, "max_stars_repo_head_hexsha": "34d8fa51198be3b0f8d10982b69b79d277682638", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "okayjustin/roborodentia2017", "max_stars_repo_path": "report/appendices/angle_perf.tex", "max_stars_repo_stars_event_max_datetime": "2019-01-03T06:12:14.000Z", "max_stars_repo_stars_event_min_datetime": "2019-01-03T06:12:14.000Z", "num_tokens": 1040, "size": 3331 }
\documentclass[030-workshop.tex]{subfiles} \begin{document} \subsection{ds4biomed Table of Contents} \label{sse:ds4biomed-toc} The materials for ``Data Science for the Biomedical Sciences'' can be found at the following URL: \url{https://ds4biomed.tech/} \begin{enumerate} \item Welcome \item Preface \item Who is this book for \item Code of Conduct \item Setup \item Workshop logistics \item Introduction \item spreadsheets R + RStudio \item Load Data \item Descriptive Calculations \item Clean Data \item Visualization \item Analysis Intro \item 30-Day Readmittance \item Working with multiple datasets \item Application Programming Interfaces (APIs) \item Functions \item Survival Analysis \item Machine Learning (tidymodels) \item Additional Resources \end{enumerate} \end{document}
{ "alphanum_fraction": 0.7205040092, "avg_line_length": 25.6764705882, "ext": "tex", "hexsha": "41c7373875201641d3746306931708663ac6a176", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1e9df7198b28bc5f81885a94cc27b114fae94817", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "chendaniely/dissertation-edt", "max_forks_repo_path": "030-workshop/030-050-040-ds4biomed_toc.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1e9df7198b28bc5f81885a94cc27b114fae94817", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "chendaniely/dissertation-edt", "max_issues_repo_path": "030-workshop/030-050-040-ds4biomed_toc.tex", "max_line_length": 97, "max_stars_count": null, "max_stars_repo_head_hexsha": "1e9df7198b28bc5f81885a94cc27b114fae94817", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "chendaniely/dissertation-edt", "max_stars_repo_path": "030-workshop/030-050-040-ds4biomed_toc.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 228, "size": 873 }
\unnumberedsection{Declaration of Original and Sole Authorship} \section*{Declaration of Original and Sole Authorship} I, \name, declare that this thesis entitled \emph{\ibtitle} and the data presented in it are original and my own work. I confirm that: \begin{itemize} \item No part of this work has previously been submitted for grading in any course. \item References to the work of others have been clearly acknowledged. Quotations from the work of others have been clearly indicated, and attributed to them. \item In cases where others have contributed to part of this work, such contribution has been clearly acknowledged and distinguished from my own work. \end{itemize} Date: \submissiondate Signature: \textbf{You may include here an image with a scan of your signature.}
{ "alphanum_fraction": 0.7886792453, "avg_line_length": 36.1363636364, "ext": "tex", "hexsha": "5698e14e6bfa51e95c6ea3510e21af1f980d93cd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "62548e1fdc8b3a3ab76b808108dc4603ab8a8e6a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "JeffersonUCC/IB-LaTeX-Template", "max_forks_repo_path": "pre/declaration.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "62548e1fdc8b3a3ab76b808108dc4603ab8a8e6a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "JeffersonUCC/IB-LaTeX-Template", "max_issues_repo_path": "pre/declaration.tex", "max_line_length": 158, "max_stars_count": null, "max_stars_repo_head_hexsha": "62548e1fdc8b3a3ab76b808108dc4603ab8a8e6a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "JeffersonUCC/IB-LaTeX-Template", "max_stars_repo_path": "pre/declaration.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 177, "size": 795 }
\subsubsection{Variant calling} After deciding on the best algorithm we will carry out variant calling. We will carry out joint variant calling across as many samples as possible to enable a greater sensitivity for low frequency variants and improve the ability to filter out false variants. We have also previously shown that calling across populations and doing genotype refinement yields better concordance and correlation with SNP arrays. \begin{figure}[!htbp] \centering \includegraphics[width=0.8\textwidth]{ADRP/figures/calling} \caption[Homogenised calling across all datasets to generate a single panel]{Homogenised calling across all datasets to generate a single panel. Figure created by Deepti Gurdasani.} \label{fig:calling} \end{figure} %Datasets generated with unique Illumina chemistry, and those with different coverages will need to be called in separate subsets. GATK3.4 \gls{UG} will be used for calling SNPs from the low coverage data, respectively. Multiple samples will be called simultaneously with \gls{UG}. During variant calling \gls{UG} by default downsamples each sample randomly to a maximum coverage of 250 (\-\-downsampling\_type BY\_SAMPLE and \-\-downsample\_to\_coverage 250). We will use the default minimum base quality for UG, which is currently 17 (\-\-min\_base\_quality\_score 17). %10 for HC At each site we don't call more than the 6 best alternate alleles (\-\-max\_alternate\_alleles 6). For low coverage data we use calling and emission thresholds (\-stand\_call\_conf and \-stand\_emit\_conf) of 10 as the sample count is greater than 100 as per \href{https://www.broadinstitute.org/gatk/guide/pdfdocs/GATK_GuideBook_2.7-4.pdf}{GATK best practises}. %page 13 %Indels will be called with a plethora of software packages (see the next section). %If pedigree information is available, then this will be used by UnifiedGenotyper %and GenotypeGVCFs %in calculation of the InbreedingCoeff annotation, which is used for subsequent variant filtering. Pedigree information will also be used for refinement and phasing. Incomplete pedigrees will be inferred from IBD matrices for sequenced and non-sequenced samples in each cohort. %\input{sections/curation/indelcalling} Variant calling is carried out jointly across 2,478 individuals; i.e. 1,140 males and 1,338 females. The SNP count after variant calling is summarised in table \ref{tab:SNPcount}. \paragraph{Calling of chromosomes X and Y} The X chromosome will be called jointly for males and females with the same ploidy, because the current version of \gls{UG} is not functional for ploidies different from 2. Likewise the pseudoautosomal regions (PARs) 1 and 2 on the X chromosome will be called like the autosomes; i.e. jointly for males and females with all samples treated as diploid. Variant calling of the Y chromosome will only be carried out for males. The PARs on the Y chromosome are masked in the reference sequence and not subject to calling. An excessive number of heterezogyous calls of haploid genotypes can be utilized for a QC step of sites and samples; thresholds to be decided. %The mitochondrial variants will be called with GATK, VarScan2\cite{Koboldt2012} and MitoSeek and the union set recalled and annotated with GATK prior to filtering. VarScan is chosen, because it performs well at extreme read depths.\cite{Stead2013} MitoSeek is a software package dedicated to calling variants from mtDNA reads. When calling with GATK the ploidy will be set to the mean coverage in the MT contig divided by the mean coverage in the somatic chromosomes. %ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/technical/reference/phase2_reference_assembly_sequence/README_human_reference_20110707 %http://gatkforums.broadinstitute.org/discussion/1214/can-i-use-gatk-on-non-diploid-organisms %GoNL - "Consensus sequences were called by GATK." %\input{tables/ploidies}
{ "alphanum_fraction": 0.8071354705, "avg_line_length": 113.7647058824, "ext": "tex", "hexsha": "2a76bb126631d69b34875cd9e732f39c4dd30e87", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "348a4cf3e796b617360fa28138b425b5818226be", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tommycarstensen/first_year_report", "max_forks_repo_path": "ADRP/sections/curation/calling.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "348a4cf3e796b617360fa28138b425b5818226be", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tommycarstensen/first_year_report", "max_issues_repo_path": "ADRP/sections/curation/calling.tex", "max_line_length": 659, "max_stars_count": null, "max_stars_repo_head_hexsha": "348a4cf3e796b617360fa28138b425b5818226be", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "tommycarstensen/first_year_report", "max_stars_repo_path": "ADRP/sections/curation/calling.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 939, "size": 3868 }
\section{Conclusions and future work} We have described a technique that allows us to avoid metastability issues in the implementation of \clos{} in our system \sicl{} by replacing those issues by simpler bootstrapping issues. Furthermore, our technique also simplifies bootstrapping by avoiding special cases due to the non-existence of generic functions when the system is bootstrapped. To avoid these issues, we use the \emph{host} generic function machinery in early stages of bootstrapping. Currently, nothing prevents a specified method on a specified generic function, specializing on specified classes to be modified or removed, and nothing prevents a specified class from being redefined. Should this happen, ``the game would of course be over.'' We imagine a mechanism that protects the user from inadvertently invoking such operations. It should probably be possible to \emph{toggle} the mechanism so that system code can make modifications known to be safe. When this article was written, \sicl{} was not yet finished, nor even in a state to be executed standalone. However, most of the difficult components (such as the compiler, the garbage collector, and of course \clos{}) were in a fairly advanced stage of development. The final verdict on the technique for bootstrapping the system can not be determined until the system is able to run standalone. %% LocalWords: metastability
{ "alphanum_fraction": 0.8039772727, "avg_line_length": 52.1481481481, "ext": "tex", "hexsha": "72fb7e375dcf8aabfbc07268046d4e273530b0d8", "lang": "TeX", "max_forks_count": 80, "max_forks_repo_forks_event_max_datetime": "2022-03-15T05:30:33.000Z", "max_forks_repo_forks_event_min_datetime": "2015-03-06T12:52:05.000Z", "max_forks_repo_head_hexsha": "ec5cc25de783ecce373081ab72d2a04359155ad6", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "gwerbin/SICL", "max_forks_repo_path": "Papers/Satiation/sec-conclusions.tex", "max_issues_count": 85, "max_issues_repo_head_hexsha": "ec5cc25de783ecce373081ab72d2a04359155ad6", "max_issues_repo_issues_event_max_datetime": "2022-02-18T11:06:19.000Z", "max_issues_repo_issues_event_min_datetime": "2015-03-25T00:31:09.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "gwerbin/SICL", "max_issues_repo_path": "Papers/Satiation/sec-conclusions.tex", "max_line_length": 72, "max_stars_count": 842, "max_stars_repo_head_hexsha": "ec5cc25de783ecce373081ab72d2a04359155ad6", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "gwerbin/SICL", "max_stars_repo_path": "Papers/Satiation/sec-conclusions.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-30T14:03:04.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-12T15:44:23.000Z", "num_tokens": 311, "size": 1408 }
% !TeX spellcheck = en_US \documentclass[hidelinks,titlepage,a4paper]{article} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{lmodern} \usepackage{newpxtext,newpxmath} \linespread{1.05} % Line spacing - Palatino needs more space between lines \usepackage{microtype} \usepackage[group-separator={,}]{siunitx} \usepackage[tikz]{bclogo} \usepackage{pgfplots} \usepackage{appendix} \usepackage{verbatim} \usepackage[hyphens]{url} \usepackage{hyperref} % For hyperlinks in the PDF \usepackage{fontawesome5} \usepackage[os=win]{menukeys} \usepackage{xfrac} \usepackage[euler]{textgreek} \usepackage[hmarginratio=1:1,top=32mm,columnsep=20pt]{geometry} % Document margins \geometry{a4paper,textwidth=6.5in,hmarginratio=1:1, textheight=9in,vmarginratio=1:1,heightrounded} \usepackage{titlesec} \setcounter{secnumdepth}{4} \setcounter{tocdepth}{4} \titleformat{\paragraph} {\normalfont\normalsize\bfseries}{\theparagraph}{1em}{} \titlespacing*{\paragraph} {0pt}{3.25ex plus 1ex minus .2ex}{1.5ex plus .2ex} \usepackage{fancyhdr} % Headers and footers \pagestyle{fancy} % All pages have headers and footers \fancyhead{} % Blank out the default header \fancyfoot{} % Blank out the default footer \fancyhead[L]{Tracy Profiler} \fancyhead[R]{The user manual} \fancyfoot[RO]{\thepage} % Custom footer text \usepackage{listings} \usepackage{xcolor} \usepackage{float} \lstset{language=C++} \lstset{ basicstyle=\footnotesize\ttfamily, tabsize=4, extendedchars=true, breaklines=true, stringstyle=\ttfamily, showspaces=false, xleftmargin=17pt, framexleftmargin=17pt, framexrightmargin=5pt, framexbottommargin=4pt, showstringspaces=false, escapeinside={@}{@}, aboveskip=\baselineskip, belowskip=\baselineskip } \usepackage[hang,small,labelfont=bf,up,textfont=it,up]{caption} % Custom captions under/above floats in tables or figures \usepackage{tikz} \usetikzlibrary{arrows.meta,positioning,shapes,patterns} \newcommand{\LMB}{\includegraphics[height=.8\baselineskip]{icons/lmb}} \newcommand{\RMB}{\includegraphics[height=.8\baselineskip]{icons/rmb}} \newcommand{\MMB}{\includegraphics[height=.8\baselineskip]{icons/mmb}} \newcommand{\Scroll}{\includegraphics[height=.8\baselineskip]{icons/scroll}} \newcommand*\circled[1]{\tikz[baseline=(char.base)]{ \node[shape=circle,draw,inner sep=1.5pt] (char) {#1};}} \begin{document} \begin{titlepage} \centering {\fontsize{120}{140}\selectfont Tracy Profiler} \vspace{50pt} {\Huge\fontfamily{lmtt}\selectfont The user manual} \vfill \includegraphics[height=40mm]{../icon/icon} \vfill \large\textbf{Bartosz Taudul} \href{mailto:[email protected]}{<[email protected]>} \vspace{10pt} \today \vfill \url{https://github.com/wolfpld/tracy} \end{titlepage} \tableofcontents \newpage \section{A quick look at Tracy Profiler} Tracy is a real-time, nanosecond resolution \emph{hybrid frame and sampling profiler} that can be used for remote or embedded telemetry of games and other applications. It can profile CPU (C, C++11, Lua), GPU (OpenGL, Vulkan) and memory. It also can monitor locks held by threads and show where contention does happen. While Tracy can perform statistical analysis of sampled call stack data, just like other \emph{statistical profilers} (such as VTune, perf or Very Sleepy), it mainly focuses on manual markup of the source code, which allows frame-by-frame inspection of the program execution. You will be able to see exactly which functions are called, how much time is spent in them, and how do they interact with each other in a multi-threaded environment. In contrast, the statistical analysis may show you the hot spots in your code, but it is unable to accurately pinpoint the underlying cause for semi-random frame stutter that may occur every couple of seconds. Even though Tracy targets \emph{frame} profiling, with the emphasis on analysis of \emph{frame time} in real-time applications (i.e.~games), it does work with utilities that do not employ the concept of a frame. There's nothing that would prohibit profiling of, for example, a compression tool, or an event-driven UI application. You may think of Tracy as the RAD Telemetry plus Intel VTune, on overdrive. \subsection{Real-time} The concept of Tracy being a real-time profiler may be explained in a couple of different ways: \begin{enumerate} \item The profiled application is not slowed down by profiling\footnote{See section~\ref{perfimpact} for a benchmark.}. The act of recording a profiling event has virtually zero cost -- it only takes a few nanoseconds. Even on low-power mobile devices there's no perceptible impact on execution speed. \item The profiler itself works in real-time, without the need to process collected data in a complex way. Actually, it is quite inefficient in the way it works, as the data it presents is calculated anew each frame. And yet it can run at 60 frames per second. \item The profiler has full functionality when the profiled application is running and the data is still being collected. You may interact with your application and then immediately switch to the profiler, when a performance drop occurs. \end{enumerate} \subsection{Nanosecond resolution} It is hard to imagine how long a nanosecond is. One good analogy is to compare it with a measure of length. Let's say that one second is one meter (the average doorknob is on the height of one meter). One millisecond ($\frac{1}{1000}$ of a second) would be then the length of a millimeter. The average size of a red ant or the width of a pencil is 5 or 6~\si{\milli\metre}. A modern game running at 60 frames per second has only 16~\si{\milli\second} to update the game world and render the entire scene. One microsecond ($\frac{1}{1000}$ of a millisecond) in our comparison equals to one micron. The diameter of a typical bacterium ranges from 1 to 10 microns. The diameter of a red blood cell, or width of strand of spider web silk is about 7~\si{\micro\metre}. And finally, one nanosecond ($\frac{1}{1000}$ of a microsecond) would be one nanometer. The modern microprocessor transistor gate, the width of DNA helix, or the thickness of a cell membrane are in the range of 5~\si{\nano\metre}. In one~\si{\nano\second} the light can travel only 30~\si{\centi\meter}. Tracy can achieve single-digit nanosecond measurement resolution, due to usage of hardware timing mechanisms on the x86 and ARM architectures\footnote{In both 32 and 64~bit variants. On x86 Tracy requires a modern version of the \texttt{rdtsc} instruction (Sandy Bridge and later). On ARM-based systems Tracy will try to use the timer register (\textasciitilde 40 \si{\nano\second} resolution). If it fails (due to kernel configuration), Tracy falls back to system provided timer, which can range in resolution from 250 \si{\nano\second} to 1 \si{\micro\second}.}. Other profilers may rely on the timers provided by operating system, which do have significantly reduced resolution (about 300~\si{\nano\second} -- 1~\si{\micro\second}). This is enough to hide the subtle impact of cache access optimization, etc. \subsubsection{Timer accuracy} You may wonder why it is important to have a truly high resolution timer\footnote{Interestingly, the \texttt{std::chrono::high\_resolution\_clock} is not really a high resolution clock.}. After all, you only want to profile functions that have long execution times, and not some short-lived procedures, that have no impact on the application's run time. It is wrong to think so. Optimizing a function to execute in 430~\si{\nano\second}, instead of 535~\si{\nano\second} (note that there is only a 100~\si{\nano\second} difference) results in 14 \si{\milli\second} savings if the function is executed 18000 times\footnote{This is a real optimization case. The values are median function run times and do not reflect the real execution time, which explains the discrepancy in the total reported time.}. It may not seem like a big number, but this is how much time there is to render a complete frame in a 60~FPS game. Imagine that this is your particle processing loop. You also need to understand how timer precision is reflected in measurement errors. Take a look at figure~\ref{timer}. There you can see three discrete timer tick events, which increase the value reported by the timer by 300~\si{\nano\second}. You can also see four readings of time ranges, marked $A_1$, $A_2$; $B_1$, $B_2$; $C_1$, $C_2$ and $D_1$, $D_2$. \begin{figure}[h] \centering\begin{tikzpicture} \draw [-{Stealth}] (-1.5, 0) -- (11.5, 0) node[anchor=south east] {Time}; \draw (0, -0.25) -- (0, 0.25) node[anchor=south] {\faClock}; \draw (5, -0.25) -- (5, 0.25) node[anchor=south] {\faClock}; \draw (10, -0.25) -- (10, 0.25) node[anchor=south] {\faClock}; \draw (0, 0.9) -- (0, 1) -- (5, 1) -- (5, 0.9); \draw (2.5, 1) node[anchor=south] {300~\si{\nano\second}}; \draw (4.9, 0.1) -- (4.9, -0.1); \draw [{Stealth}-] (4.9, -0.2) -- (4.8, -0.7) node[anchor=north] {$A_1$}; \draw (5.1, 0.1) -- (5.1, -0.1); \draw [{Stealth}-] (5.1, -0.2) -- (5.2, -0.7) node[anchor=north] {$A_2$}; \draw (0.1, 0.1) -- (0.1, -0.1); \draw [{Stealth}-] (0.1, -0.2) -- (0.2, -0.7) node[anchor=north] {$B_1$}; \draw (9.9, 0.1) -- (9.9, -0.1); \draw [{Stealth}-] (9.9, -0.2) -- (9.8, -0.7) node[anchor=north] {$B_2$}; \draw (-0.1, 0.1) -- (-0.1, -0.1); \draw [{Stealth}-] (-0.1, -0.2) -- (-0.2, -0.7) node[anchor=north] {$C_1$}; \draw (10.1, 0.1) -- (10.1, -0.1); \draw [{Stealth}-] (10.1, -0.2) -- (10.2, -0.7) node[anchor=north] {$C_2$}; \draw (2.4, 0.1) -- (2.4, -0.1); \draw [{Stealth}-] (2.4, -0.2) -- (2.3, -0.7) node[anchor=north] {$D_1$}; \draw (2.6, 0.1) -- (2.6, -0.1); \draw [{Stealth}-] (2.6, -0.2) -- (2.7, -0.7) node[anchor=north] {$D_2$}; \end{tikzpicture} \caption{Low precision (300~ns) timer. Discrete timer ticks are indicated by the \faClock{} icon.} \label{timer} \end{figure} Now let's take a look at the timer readings. \begin{itemize} \item The $A$ and $D$ ranges both take a very short amount of time (10~\si{\nano\second}), but the $A$ range is reported as 300~\si{\nano\second}, and the $D$ range is reported as 0~\si{\nano\second}. \item The $B$ range takes a considerable amount of time (590~\si{\nano\second}), but according to the timer readings, it took the same time (300~\si{\nano\second}) as the short lived $A$ range. \item The $C$ range (610~\si{\nano\second}) is only 20~\si{\nano\second} longer than the $B$ range, but it is reported as 900~\si{\nano\second}, a 600~\si{\nano\second} difference! \end{itemize} Here you can see why it is important to use a high precision timer. While there is no escape from the measurement errors, their impact can be reduced by increasing the timer accuracy. \subsection{Frame profiler} Tracy is aimed at understanding the inner workings of a tight loop of a game (or any other kind of an interactive application). That's why it slices the execution time of a program using the \emph{frame}\footnote{A frame is used to describe a single image displayed on the screen by the game (or any other program), preferably 60 times per second to achieve smooth animation. You can also think about physics update frames, audio processing frames, etc.} as a basic work-unit\footnote{Frame usage is not required. See section~\ref{markingframes} for more information.}. The most interesting frames are the ones that took longer than the allocated time, producing visible hitches in the on-screen animation. Tracy allows inspection of such misbehavior. \subsection{Remote or embedded telemetry} Tracy uses the client-server model to enable a wide range of use-cases (see figure~\ref{clientserver}). For example, a game on a mobile phone may be profiled over the wireless connection, with the profiler running on a desktop computer. Or you can run the client and server on the same machine, using a localhost connection. It is also possible to embed the visualization front-end in the profiled application, making the profiling self-contained\footnote{See section~\ref{embeddingserver} for guidelines.}. \begin{figure}[h] \centering\begin{tikzpicture} [inner sep=1.5mm, bend angle=30, thread/.style={rectangle, draw}, module/.style={rectangle, draw, rounded corners=8pt}, collect/.style={{Stealth}-, shorten <=4pt, shorten >=4pt}, network/.style={cloud, draw, cloud ignores aspect, cloud puffs=11.6}] \node[thread] (t1) {\faRandom{} Thread 1}; \node[thread] (t2) [below=of t1] {\faRandom{} Thread 2}; \node[thread] (t3) [below=of t2] {\faRandom{} Thread 3}; \node[module] (client) [right=of t2] {Tracy client} edge [collect, bend right] (t1) edge [collect] (t2) edge [collect, bend left] (t3); \node[network] (network) [right=of client] {Network} edge [collect] (client); \node[module] (server) [right=of network] {Tracy server} edge [collect] (network); \begin{scope}[node distance=12pt, bend angle=25] \node[thread] (display) [above right=of server] {\faTv{} Display} edge [collect, bend right] (server); \node[thread] (storage) [below right=of server] {\faDatabase{} Storage} edge [collect, bend left] (server); \end{scope} \end{tikzpicture} \caption{Client-server model.} \label{clientserver} \end{figure} In Tracy terminology, the profiled application is a \emph{client} and the profiler itself is a \emph{server}. It was named this way because the client is a thin layer that just collects events and sends them for processing and long-term storage on the server. The fact that the server needs to connect to the client to begin the profiling session may be a bit confusing at first. \subsection{Why Tracy?} You may wonder, why should you use Tracy, when there are so many other profilers available. Here are some arguments: \begin{itemize} \item Tracy is free and open source (BSD license), while RAD Telemetry costs about \$8000 per year. \item Tracy provides out-of-the-box Lua bindings. It has been successfully integrated with other native and interpreted languages (Rust, Arma scripting language) using the C API (see chapter~\ref{capi} for reference). \item Tracy has a wide variety of profiling options. You can profile CPU, GPU, locks, memory allocations, context switches and more. \item Tracy is feature rich. Statistical information about zones, trace comparisons, or inclusion of inline function frames in call stacks (even in statistics of sampled stacks) are features unique to Tracy. \item Tracy focuses on performance. Many tricks are used to reduce memory requirements and network bandwidth. The impact on the client execution speed is minimal, while other profilers perform heavy data processing within the profiled application (and then claim to be lightweight). \item Tracy uses low-level kernel APIs, or even raw assembly, where other profilers rely on layers of abstraction. \item Tracy is multi-platform right from the very beginning. Both on the client and server side. Other profilers tend to have Windows-specific graphical interfaces. \item Tracy can handle millions of frames, zones, memory events, and so on, while other profilers tend to target very short captures. \item Tracy doesn't require manual markup of interesting areas in your code to start profiling. You may rely on automated call stack sampling and add instrumentation later, when you know where it's needed. \item Tracy provides mapping of source code to the assembly, with detailed information about cost of executing each instruction on the CPU. \end{itemize} With all that being said, Tracy may not be the right choice for you, if you need to profile games targetting PS4, Xbox, or other consoles behind a NDA wall. \subsection{Performance impact} \label{perfimpact} To check how much slowdown is introduced by using Tracy, let's profile an example application. For this purpose we have used etcpak\footnote{\url{https://github.com/wolfpld/etcpak}}. The input data was a $16384 \times 16384$ pixels test image and the $4 \times 4$ pixel block compression function was selected to be instrumented. The image was compressed on 12 parallel threads, and the timing data represents a mean compression time of a single image. The results are presented in table~\ref{PerformanceImpact}. Dividing the average of run time differences (37.7 \si{\milli\second}) by a number of captured zones per single image (\num{16777216}) shows us that the impact of profiling is only 2.25 \si{\nano\second} per zone (this includes two events: start and end of a zone). \begin{table}[h] \centering \begin{tabular}[h]{c|c|c|c|c|c} \textbf{Mode} & \textbf{Zones (total)} & \textbf{Zones (single image)} & \textbf{Clean run} & \textbf{Profiling run} & \textbf{Difference} \\ \hline ETC1 & \num{201326592} & \num{16777216} & 110.9 \si{\milli\second} & 148.2 \si{\milli\second} & +37.3 \si{\milli\second} \\ ETC2 & \num{201326592} & \num{16777216} & 212.4 \si{\milli\second} & 250.5 \si{\milli\second} & +38.1 \si{\milli\second} \end{tabular} \caption{Zone capture time cost.} \label{PerformanceImpact} \end{table} \subsubsection{Assembly analysis} To see how such small overhead (only 2.25 \si{\nano\second}) is achieved, let's take a look at the assembly. The following x64 code is responsible for logging start of a zone. Do note that it is generated by compiling fully portable C++. \begin{lstlisting}[language={[x86masm]Assembler}] mov byte ptr [rsp+0C0h],1 ; store zone activity information mov r15d,28h mov rax,qword ptr gs:[58h] ; TLS mov r14,qword ptr [rax] ; queue address mov rdi,qword ptr [r15+r14] ; data address mov rbp,qword ptr [rdi+28h] ; buffer counter mov rbx,rbp and ebx,7Fh ; 128 item buffer jne function+54h -----------+ ; check if current buffer is usable mov rdx,rbp | mov rcx,rdi | call enqueue_begin_alloc | ; reclaim/alloc next buffer shl rbx,5 <-----------------+ ; buffer items are 32 bytes add rbx,qword ptr [rdi+48h] ; calculate queue item address mov byte ptr [rbx],10h ; queue item type rdtsc ; retrieve time shl rdx,20h or rax,rdx ; construct 64 bit timestamp mov qword ptr [rbx+1],rax ; write timestamp lea rax,[__tracy_source_location] ; static struct address mov qword ptr [rbx+9],rax ; write source location data lea rax,[rbp+1] ; increment buffer counter mov qword ptr [rdi+28h],rax ; write buffer counter \end{lstlisting} The second code block, responsible for ending a zone, is similar, but smaller, as it can reuse some variables retrieved in the above code. \subsection{Examples} To see how Tracy can be integrated into an application, you may look at example programs in the \texttt{examples} directory. Looking at the commit history might be the best way to do that. \subsection{On the web} Tracy can be found at the following web addresses: \begin{itemize} \item Homepage -- \url{https://github.com/wolfpld/tracy} \item Bug tracker -- \url{https://github.com/wolfpld/tracy/issues} \item Discord chat -- \url{https://discord.gg/pk78auc} \item Sponsoring development -- \url{https://github.com/sponsors/wolfpld/} \end{itemize} You may be also interested in a Bitbucket mirror: \url{https://bitbucket.org/wolfpld/tracy}. \section{First steps} Tracy Profiler supports MSVC, gcc and clang. A reasonably recent version of the compiler is needed, due to C++11 requirement. The following platforms are confirmed to be working (this is not a complete list): \begin{itemize} \item Windows (x86, x64) \item Linux (x86, x64, ARM, ARM64) \item Android (ARM, ARM64, x86) \item FreeBSD (x64) \item Cygwin (x64) \item MinGW (x64) \item WSL (x64) \item OSX (x64) \item iOS (ARM, ARM64) \end{itemize} \subsection{Initial client setup} The recommended way to integrate Tracy into an application is to create a git submodule in the repository (assuming that git is used for version control). This way it is very easy to update Tracy to newly released versions. If that's not an option, copy all files from the Tracy checkout directory to your project. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bclampe ]{What revision should I use?} When deciding on the Tracy Profiler version you want to use, you have basically two options. Take into consideration the following pros and cons: \begin{itemize} \item Using the last-version-tagged revision will give you a stable platform to work with. You won't experience any breakages, major UI overhauls or network protocol changes. Unfortunately, you also won't be getting any bug fixes. \item Working with the bleeding edge \texttt{master} development branch will give you access to all the new improvements and features added to the profiler. While it is generally expected that \texttt{master} should always be usable, \textbf{there are no guarantees that it will be so.} \end{itemize} Do note that all bug fixes and pull requests are made against the \texttt{master} branch. \end{bclogo} With the source code included in your project, add the \texttt{tracy/TracyClient.cpp} source file to the IDE project and/or makefile. You're done. Tracy is now integrated into the application. In the default configuration Tracy is disabled. This way you don't have to worry that the production builds will perform collection of profiling data. You will probably want to create a separate build configuration, with the \texttt{TRACY\_ENABLE} define, which enables profiling. Be careful to enter the define name as specified, don't make a mistake of adding an additional \texttt{D} at the end. Also make sure that this macro is defined for all files across your project. The application you want to profile should be compiled with all the usual optimization options enabled (i.e.~make a release build). It makes no sense to profile debugging builds, as the unoptimized code and additional checks (asserts, etc.) completely change how the program behaves. Finally, on Unix make sure that the application is linked with libraries \texttt{libpthread} and \texttt{libdl}. BSD systems will also need to be linked with \texttt{libexecinfo}. \subsubsection{Short-lived applications} In case you want to profile a short-lived program (for example, a compression utility that finishes its work in one second), set the \texttt{TRACY\_NO\_EXIT} environment variable to $1$. With this option enabled, Tracy will not exit until an incoming connection is made, even if the application has already finished executing. If your platform doesn't support easy setup of environment variables, you may also add the \texttt{TRACY\_NO\_EXIT} define to your build configuration, which has the same effect. \subsubsection{On-demand profiling} \label{ondemand} By default Tracy will begin profiling even before the program enters the \texttt{main} function. If you don't want to perform a full capture of application life-time, you may define the \texttt{TRACY\_ON\_DEMAND} macro, which will enable profiling only when there's an established connection with the server. It should be noted, that if on-demand profiling is \emph{disabled} (which is the default), then the recorded events will be stored in the system memory until a server connection is made and the data can be uploaded\footnote{This memory is never released, but it is reused for collection of further events.}. Depending on the amount of the things profiled, the requirements for event storage can easily grow up to a couple of gigabytes. Since this data is cleared after the initial connection is made, you won't be able to perform a second connection to a client, unless the on-demand mode is used. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} The client with on-demand profiling enabled needs to perform additional bookkeeping, in order to present a coherent application state to the profiler. This incurs additional time cost for each profiling event. \end{bclogo} \subsubsection{Client discovery} By default Tracy client will announce its presence to the local network\footnote{Additional configuration may be required to achieve full functionality, depending on your network layout. Read about UDP broadcasts for more information.}. If you want to disable this feature, define the \texttt{TRACY\_NO\_BROADCAST} macro. \subsubsection{Setup for multi-DLL projects} In projects that consist of multiple DLLs/shared objects things are a bit different. Compiling \texttt{TracyClient.cpp} into every DLL is not an option because this would result in several instances of Tracy objects lying around in the process. We rather need to pass the instances of them to the different DLLs to be reused there. For that you need a \emph{profiler DLL} to which your executable and the other DLLs link. If that doesn't exist you have to create one explicitly for Tracy\footnote{You may also look at the \texttt{library} directory in the profiler source tree.}. This library should contain the \texttt{tracy/TracyClient.cpp} source file. Link the executable and all DLLs which you want to profile to this DLL. If you are targeting Windows with Microsoft Visual Studio or MinGW, add the \texttt{TRACY\_IMPORTS} define to your application. \subsubsection{Problematic platforms} Some OS vendors think that \emph{they} own and control the devices \emph{you} have paid for. This results in restricting usage of APIs that might 'confuse' you, or denying you access to information about what your computer is doing. This is a very sad state of things. \paragraph{Apple woes} Because Apple \emph{has} to be \emph{think different}, there are some problems with using Tracy on OSX and iOS. First, the performance hit due to profiling is higher than on other platforms. Second, some critical features are missing and won't be possible to achieve: \begin{itemize} \item There's no support for the \texttt{TRACY\_NO\_EXIT} mode. \item Profiling is interrupted when the application exits. This will result in missing zones, memory allocations, or even source location names. \item OpenGL can't be profiled. \end{itemize} \paragraph{Android lunacy} \label{androidlunacy} Starting with Android 8.0 you are no longer allowed to use the \texttt{/proc} file system. One of the consequences of this change is inability to check system CPU usage. This is apparently a security enhancement. In its infinite wisdom Google has decided to not give you any option to bypass this restriction. To workaround this limitation, you will need to have a rooted device. Execute the following commands using \texttt{root} shell: \begin{lstlisting}[language=sh] setenforce 0 mount -o remount,hidepid=0 /proc \end{lstlisting} The first command will allow access to system CPU statistics. The second one will allow inspection of foreign processes (which is required for context switch capture). \emph{Be sure that you are fully aware of the consequences of making these changes.} \paragraph{Cloud service providers} In some cases you actually don't own the hardware, but lend it from someone else. In such circumstances you might be running inside a virtual machine, which may be configured to prohibit you from using the bare metal facilities needed by Tracy\footnote{Or you might just be using a quite old CPU, which doesn't have support for required features.}. One example of such limitation would be lack of access to a reliable time stamp register readings, which will prevent the application from starting with either 'CPU doesn't support RDTSCP instruction' or 'CPU doesn't support invariant TSC' error message. If you are using Windows, you may workaround this issue by rebuilding the profiled application with the \texttt{TRACY\_TIMER\_QPC} macro, but be aware that it will severely lower the resolution of timer readings. \subsubsection{Changing network port} Network communication between the client and the server by default is performed using network port 8086. The profiling session utilizes the TCP protocol and client broadcasts are done over UDP. If for some reason you want to use another port\footnote{For example, other programs may already be using it, or you may have overzealous firewall rules, or you may want to run two clients on the same IP address.}, you can change it using the \texttt{TRACY\_DATA\_PORT} macro for the data connection, and \texttt{TRACY\_BROADCAST\_PORT} macro for client broadcasts. Alternatively, both ports may be changed at the same time by declaring the \texttt{TRACY\_PORT} macro (specific macros listed before have higher priority). The data connection port may be also changed without recompiling the client application, by setting the \texttt{TRACY\_PORT} environment variable. If a custom port is not specified and the default listening port is already occupied, the profiler will automatically try to listen on a number of other ports. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} To enable network communication, Tracy needs to open a listening port. Make sure it is not blocked by an overzealous firewall or anti-virus program. \end{bclogo} \subsubsection{Limitations} When using Tracy Profiler, keep in mind the following requirements: \begin{itemize} \item Each lock may be used in no more than 64 unique threads. \item There can be no more than 65534 unique source locations\footnote{A source location is a place in the code, which is identified by source file name and line number, for example when you markup a zone.}. This number is further split in half between native code source locations and dynamic source locations (for example, when Lua instrumentation is used). \item Profiling session cannot be longer than 1.6 days ($2^{47}$ \si{\nano\second}). This also includes on-demand sessions. \item No more than 4 billion ($2^{32}$) memory free events may be recorded. \item No more than 16 million ($2^{24}$) unique call stacks can be captured. \end{itemize} The following conditions also need apply, but don't trouble yourself with them too much. You would probably already knew, if you'd be breaking any. \begin{itemize} \item Only little-endian CPUs are supported. \item Virtual address space must be limited to 48 bits. \item Tracy server requires CPU which is able to handle misaligned memory accesses. \end{itemize} \subsection{Check your environment} It is not an easy task to reliably measure performance of an application on modern machines. There are many factors affecting program execution characteristics, some of which you will be able to minimize, and others you will have to live with. It is critically important that you understand how these variables impact profiling results, as it is key to understanding the data you get. \subsubsection{Operating system} \label{checkenvironmentos} In a multitasking operating system applications compete for system resources with each other. This has a visible effect on the measurements performed by the profiler, which you may, or may not accept. In order to get the most accurate profiling results you should minimize interference caused by other programs running on the same machine. Before starting a profile session close all web browsers, music players, instant messengers, and all other non-essential applications like Steam, Uplay, etc. Make sure you don't have the debugger hooked into the profiled program, as it also has impact on the timing results. Interference caused by other programs can be seen in the profiler, if context switch capture (section~\ref{contextswitches}) is enabled. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bclampe ]{Debugger in Visual Studio} In MSVC you would typically run your program using the \emph{Start Debugging} menu option, which is conveniently available as a \keys{F5} shortcut. You should instead use the \emph{Start Without Debugging} option, available as \keys{\ctrl + F5} shortcut. \end{bclogo} \subsubsection{CPU design} \label{checkenvironmentcpu} Where to even begin here? Modern processors are such a complex beasts, that it's almost impossible to surely say anything about how they will behave. Cache configuration, prefetcher logic, memory timings, branch predictor, execution unit counts are all the drivers of instructions-per-cycle uplift nowadays, after the megahertz race had hit the wall. Not only is it incredibly difficult to reason about, but you also need to take into account how the CPU topology affects things, which is described in more detail in section~\ref{cputopology}. Nevertheless, let's take a look on the ways we can try to stabilize the profiling data. \paragraph{Superscalar out-of-order speculative execution} Also known as: the \emph{spectre} thing we have to dealt with now. You must be aware that most processors available on the market\footnote{With the exception of low-cost ARM CPUs.} \emph{do not} execute machine code in a linear way, as laid out in the source code. This can lead to counterintuivive timing results reported by Tracy. Trying to get more 'reliable' readings\footnote{And by saying 'reliable' you do in reality mean: behaving in a way you expect it to.} would require a change in the behavior of the code and this is not a thing a profiler should do. Instead, Tracy shows you what the hardware is \emph{really} doing. This is a complex subject and the details vary from one CPU to another. You can read a brief rundown of the topic at the following address: \url{https://travisdowns.github.io/blog/2019/06/11/speed-limits.html}. \paragraph{Simultaneous multithreading} Also known as: Hyper-threading. Typically present on Intel and AMD processors. To get the most reliable results you should have whole CPU core resources dedicated to a single thread of your program. Otherwise you're no longer measuring the behavior of your code, but rather how it keeps up when its computing resources are randomly taken away by some other thing running on another pipeline within the same physical core. Note that you might \emph{want} to observe this behavior, if you plan to deploy your application on a machine with simultaneous multithreading enabled. This would require careful examination of what else is running on the machine, or even how the threads of your own program are scheduled by the operating system, as various combinations of competing workloads (e.g. integer/floating point operations) will be impacted differently. \paragraph{Turbo mode frequency scaling} Also known as: Turbo Boost (Intel), Precision Boost (AMD). While the CPU is more-or-less designed to always be able to work at the advertised \emph{base} frequency, there is usually some headroom left, which allows usage of the built-in automatic overclocking. There are no guarantees that the turbo frequencies can be attained, or how long they will be held, as there are many things to take into consideration: \begin{itemize} \item How many cores are being used? Just one, or all 8? All 16? \item What type of work is being performed? Integer? Floating point? 128-wide SIMD? 256-wide SIMD? 512-wide SIMD? \item Were you lucky in the silicon lottery? Some dies are simply better made and are able to achieve higher frequencies. \item Are you running on the best-rated core, or at the worst-rated core? Some cores may be unable to match the performance of other cores in the same processor. \item What kind of cooling solution are you using? The cheap one bundled with the CPU, or a beefy chunk of metal that has no problem with heat dissipation? \item Do you have complete control over the power profile? Spoiler alert: no. The operating system may run anything at any time on any of the other cores, which will impact the turbo frequency you're able to achieve. \end{itemize} As you can see, this feature basically screams 'unreliable results!' Best keep it disabled and run at the base frequency. Otherwise your timings won't make much sense. A true example: branchless compression function executing multiple times with the same input data was measured executing at \emph{four} different speeds. Keep in mind that even at the base frequency you may hit thermal limits of the silicon and be downthrottled. \paragraph{Power saving} This is basically the same as turbo mode, but in reverse. While unused, processor cores are kept at lower frequencies (or even completely disabled) to reduce power usage. When your code starts running\footnote{Not necessarily when the application is started, but also when, for example, a blocking mutex becomes released by other thread and is acquired.} the core frequency needs to ramp up, which may be visible in the measurements. What's even worse, if your code doesn't do a lot of work (for example, because it is waiting for the GPU to finish rendering the frame), the core frequency might not be ramped up to 100\%, which will skew the results. Again, to get the best results, keep this feature disabled. \paragraph{AVX offset and power licenses} Intel CPUs are unable to run at their advertised frequencies when wide SIMD operations are performed due to increased power requirements\footnote{AMD processors are not affected by this issue.}. Depending on the width \emph{and} type of operations performed, the core operating frequency will be reduced, in some cases quite drastically\footnote{\url{https://en.wikichip.org/wiki/intel/xeon_gold/5120\#Frequencies}}. To make things even better, \emph{some} part of the workload will be executed within the available power license, at twice reduced processing rate, then the CPU may be stopped for some time, so that the wide parts of executions units may be powered up, then the work will continue at full processing rate, but at reduced frequency. Be very careful when using AVX2 or AVX512. More information can be found at \url{https://travisdowns.github.io/blog/2020/01/17/avxfreq1.html}, \url{https://en.wikichip.org/wiki/intel/frequency_behavior}. \paragraph{Summing it up} \label{ryzen} Power management schemes employed in various CPUs make it hard to reason about true performance of the code. For example, figure~\ref{ryzenimage} contains a histogram of function execution times (as described in chapter~\ref{findzone}), as measured on an AMD Ryzen CPU. The results ranged from 13.05~\si{\micro\second} to 61.25~\si{\micro\second} (extreme outliers were not included on the graph, limiting the longest displayed time to 36.04~\si{\micro\second}). \begin{figure}[h] \centering \includegraphics[width=0.5\textwidth]{images/ryzen.png} \caption{Example function execution times on a Ryzen CPU} \label{ryzenimage} \end{figure} We can immediately see that there are two distinct peaks, at 13.4~\si{\micro\second} and 15.3~\si{\micro\second}. A reasonable assumption would be that there are two paths in the code, one that can omit some work, and the second one which must do some additional job. But here's a catch -- the measured code is actually branchless and is always executed the same way. The two peaks represent two turbo frequencies between which the CPU was aggressively switching. We can also see that the graph gradually falls off to the right (representing longer times), with a small bump near the end. This can be attributed to running in power saving mode, with differing reaction times to the required operating frequency boost to full power. \subsection{Building the server} The easiest way to get going is to build the data analyzer, available in the \texttt{profiler} directory. With it you can connect to localhost or remote clients and view the collected data right away. If you prefer to inspect the data only after a trace has been performed, you may use the command line utility in the \texttt{capture} directory. It will save a data dump that may be later opened in the graphical viewer application. Note that ideally you should be using the same version of the Tracy profiler on both client and server. The network protocol may change in between versions, in which case you won't be able to make a connection. See section~\ref{capturing} for more information about performing captures. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} Due to the memory requirements for data storage, Tracy server is only supposed to run on 64-bit platforms. While there is nothing preventing the program from building and executing in a 32-bit environment, doing so is not supported. \end{bclogo} \subsubsection{Required libraries} To build the application contained in the \texttt{profiler} directory, you will need to install external libraries, which are not bundled with Tracy. \paragraph{Windows} On Windows you will need to use the \texttt{vcpkg} utility. If you are not familiar with this tool, please read the description at the following address: \url{https://docs.microsoft.com/en-us/cpp/build/vcpkg}. There are two ways you can run \texttt{vcpkg} to install the dependencies for Tracy: \begin{itemize} \item Local installation within the project directory -- run this script to download and build both \texttt{vcpkg} and the required dependencies: \begin{lstlisting}[language=sh] vcpkg\install_vcpkg_dependencies.bat \end{lstlisting} This writes files only to the \texttt{vcpkg\textbackslash{}vcpkg} directory and makes no other changes on your machine. \item System-wide installation -- install \texttt{vcpkg} by following the instructions on its website, and then execute the following commands: \begin{lstlisting}[language=sh] vcpkg integrate install vcpkg install --triplet x64-windows-static freetype glfw3 capstone[arm,arm64,x86] \end{lstlisting} \end{itemize} \paragraph{Unix} On Unix systems you will need to install the \texttt{pkg-config} utility and the following libraries: \texttt{glfw}, \texttt{freetype}, \texttt{capstone}. Some Linux distributions will require you to add a \texttt{lib} prefix and a \texttt{-dev}, or \texttt{-devel} postfix to library names. You may also need to add a seemingly random number to the library name (for example: \texttt{freetype2}, or \texttt{freetype6}). How fun! Installation of the libraries on OSX can be facilitated using the \texttt{brew} package manager. \subsubsection{Embedding the server in profiled application} \label{embeddingserver} While not officially supported, it is possible to embed the server in your application, the same one which is running the client part of Tracy. This is left up for you to figure out. Note that most libraries bundled with Tracy are modified in some way and contained in the \texttt{tracy} namespace. The one exception is Dear ImGui, which can be freely replaced. Be aware that while the Tracy client uses its own separate memory allocator, the server part of Tracy will use global memory allocation facilities, shared with the rest of your application. This will affect both the memory usage statistics and Tracy memory profiling. The following defines may be of interest: \begin{itemize} \item \texttt{TRACY\_NO\_FILESELECTOR} -- controls whether a system load/save dialog is compiled in. If it's enabled, the saved traces will be named \texttt{trace.tracy}. \item \texttt{TRACY\_NO\_STATISTICS} -- Tracy will perform statistical data collection on the fly, if this macro is \emph{not} defined. This allows extended analysis of the trace (for example, you can perform a live search for matching zones) at a small CPU processing cost and a considerable memory usage increase (at least 8 bytes per zone). \item \texttt{TRACY\_NO\_ROOT\_WINDOW} -- the main profiler view won't occupy whole window if this macro is defined. Additional setup is required for this to work. If you are embedding the server into your application you probably want to enable this option. \end{itemize} \subsection{Naming threads} Remember to set thread names for proper identification of threads. You must use the functions exposed in the \texttt{tracy/common/TracySystem.hpp} header to do so, as the system facilities typically have limited functionality. If context switch capture is active, Tracy will try to capture thread names through operating system data. This is only a fallback mechanism and it shouldn't be relied upon. \subsection{Crash handling} \label{crashhandling} On selected platforms (see section~\ref{featurematrix}) Tracy will intercept application crashes\footnote{For example, invalid memory accesses ('segmentation faults', 'null pointer exceptions'), divisions by zero, etc.}. This serves two purposes. First, the client application will be able to send the remaining profiling data to the server. Second, the server will receive a crash report with information about the crash reason, call stack at the time of crash, etc. This is an automatic process and it doesn't require user interaction. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} On MSVC the debugger has priority over the application in handling exceptions. If you want to finish the profiler data collection with the debugger hooked-up, select the \emph{continue} option in the debugger pop-up dialog. \end{bclogo} \subsection{Feature support matrix} \label{featurematrix} Some features of the profiler are only available on selected platforms. Please refer to table~\ref{featuretable} for details. \begin{table}[h] \centering \begin{tabular}[h]{c|c|c|c|c|c|c} \textbf{Feature} & \textbf{Windows} & \textbf{Linux} & \textbf{Android} & \textbf{OSX} & \textbf{iOS} & \textbf{BSD} \\ \hline Profiling initialization & \faCheck & \faCheck & \faCheck & \faPoo & \faPoo & \faCheck \\ CPU zones & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck \\ Locks & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck \\ Plots & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck \\ Messages & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck \\ Memory & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck \\ GPU zones (OpenGL) & \faCheck & \faCheck & \faCheck & \faPoo & \faPoo & \\ GPU zones (Vulkan) & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck & \\ Call stacks & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck \\ Symbol resolution & \faCheck & \faCheck & \faCheck & \faCheck & \faPoo & \faCheck \\ Crash handling & \faCheck & \faCheck & \faCheck & \faTimes & \faTimes & \faTimes \\ CPU usage probing & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck & \faCheck \\ Context switches & \faCheck & \faCheck & \faCheck & \faTimes & \faPoo & \faTimes \\ CPU topology information & \faCheck & \faCheck & \faCheck & \faTimes & \faTimes & \faTimes \\ Call stack sampling & \faCheck & \faTimes & \faTimes & \faTimes & \faPoo & \faTimes \\ \end{tabular} \vspace{1em} \faPoo{} -- Not possible to support due to platform limitations. \caption{Feature support matrix} \label{featuretable} \end{table} \section{Client markup} \label{client} With the aforementioned steps you will be able to connect to the profiled program, but there probably won't be any data collection performed\footnote{With some small exceptions, see section~\ref{automated}.}. Unless you're able to perform automatic call stack sampling (see chapter~\ref{sampling}), you will have to manually instrument the application. All the user-facing interface is contained in the \texttt{tracy/Tracy.hpp} header file. Manual instrumentation is best started with adding markup to the main loop of the application, along with a few function that are called there. This will give you a rough outline of the function's time cost, which you may then further refine by instrumenting functions deeper in the call stack. Alternatively, automated sampling might guide you more quickly to places of interest. \subsection{Handling text strings} When dealing with Tracy macros, you will encounter two ways of providing string data to the profiler. In both cases you should pass \texttt{const char*} pointers, but there are differences in expected life-time of the pointed data. \begin{enumerate} \item When a macro only accepts a pointer (for example: \texttt{TracyMessageL(text)}), the provided string data must be accessible at any time in program execution (\emph{this also includes the time after exiting the \texttt{main} function}). The string also cannot be changed. This basically means that the only option is to use a string literal (e.g.: \texttt{TracyMessageL("Hello")}). \item If there's a string pointer with a size parameter (for example: \texttt{TracyMessage(text, size)}), the profiler will allocate an internal temporary buffer to store the data. The pointed-to data is not used afterwards. You should be aware that allocating and copying memory involved in this operation has a small time cost. \end{enumerate} \subsection{Specifying colors} In some cases you will want to provide your own colors to be displayed by the profiler. In all such places you should use a hexadecimal \texttt{0xRRGGBB} notation. Alternatively you may use named colors predefined in \texttt{common/TracyColor.hpp} (included by \texttt{Tracy.hpp}). Visual reference: \url{https://en.wikipedia.org/wiki/X11_color_names}. Do not use \texttt{0x000000} if you want to specify black color, as zero is a special value indicating that no color was set. Instead, use a value close to zero, e.g. \texttt{0x000001}. \subsection{Marking frames} \label{markingframes} To slice the program's execution recording into frame-sized chunks\footnote{Each frame starts immediately after the previous has ended.}, put the \texttt{FrameMark} macro after you have completed rendering the frame. Ideally that would be right after the swap buffers command. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bclampe ]{Do I need this?} This step is optional, as some applications do not use the concept of a frame. \end{bclogo} \subsubsection{Secondary frame sets} \label{secondaryframeset} In some cases you may want to track more than one set of frames in your program. To do so, you may use the \texttt{FrameMarkNamed(name)} macro, which will create a new set of frames for each unique name you provide. \subsubsection{Discontinuous frames} Some types of frames are discontinuous by nature. For example, a physics processing step in a game loop, or an audio callback running on a separate thread. These kinds of workloads are executed periodically, with a pause between each run. Tracy can also track these kind of frames. To mark the beginning of a discontinuous frame use the \texttt{FrameMarkStart(name)} macro. After the work is finished, use the \texttt{FrameMarkEnd(name)} macro. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} \begin{itemize} \item Frame types \emph{must not} be mixed. For each frame set, identified by an unique name, use either continuous or discontinuous frames only! \item You \emph{must} issue the \texttt{FrameMarkStart} and \texttt{FrameMarkEnd} macros in proper order. Be extra careful, especially if multi-threading is involved. \item Discontinuous frames may not work correctly if the profiled program doesn't have string pooling enabled. This is an implementation issue which will be fixed in the future. \end{itemize} \end{bclogo} \subsubsection{Frame images} \label{frameimages} It is possible to attach a screen capture of your application to any frame in the main frame set. This can help you see the context of what's happening in various places in the trace. You need to implement retrieval of the image data from GPU by yourself. Images are sent using the \texttt{FrameImage(image, width, height, offset, flip)} macro, where \texttt{image} is a pointer to RGBA\footnote{Alpha value is ignored, but leaving it out wouldn't map well to the way graphics hardware works.} pixel data, \texttt{width} and \texttt{height} are the image dimensions, which \emph{must be divisible by 4}, \texttt{offset} specifies how much frame lag was there for the current image (see chapter~\ref{screenshotcode}), and \texttt{flip} should be set, if the graphics API stores images upside-down\footnote{For example, OpenGL flips images, but Vulkan does not.}. The image data is copied by the profiler and doesn't need to be retained. Handling image data requires a lot of memory and bandwidth\footnote{One uncompressed 1080p image takes 8 MB.}. To achieve sane memory usage you should scale down taken screen shots to a sensible size, e.g. $320\times180$. To further reduce image data size, frame images are internally compressed using the DXT1 Texture Compression technique\footnote{\url{https://en.wikipedia.org/wiki/S3_Texture_Compression}}, which significantly reduces data size\footnote{One pixel is stored in a nibble (4 bits) instead of 32 bits.}, at a small quality decrease. The compression algorithm is very fast and can be made even faster by enabling SIMD processing, as indicated in table~\ref{EtcSimd}. \begin{table}[h] \centering \begin{tabular}[h]{c|c|c} \textbf{Implementation} & \textbf{Required define} & \textbf{Time} \\ \hline x86 Reference & --- & 198.2 \si{\micro\second} \\ x86 SSE4.1\textsuperscript{a} & \texttt{\_\_SSE4\_1\_\_} & 25.4 \si{\micro\second} \\ x86 AVX2 & \texttt{\_\_AVX2\_\_} & 17.4 \si{\micro\second} \\ ARM Reference & --- & 1.04 \si{\milli\second} \\ ARM32 NEON\textsuperscript{b} & \texttt{\_\_ARM\_NEON} & 529 \si{\micro\second} \\ ARM64 NEON & \texttt{\_\_ARM\_NEON} & 438 \si{\micro\second} \end{tabular} \vspace{1em} \textsuperscript{a)} VEX encoding; \hspace{0.5em} \textsuperscript{b)} ARM32 NEON code compiled for ARM64 \caption{Client compression time of $320\times180$ image. x86: Ryzen 9 3900X (MSVC); ARM: ODROID-C2 (gcc).} \label{EtcSimd} \end{table} \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} \begin{itemize} \item Frame images are compressed on a second client profiler thread\footnote{Small part of compression task is performed on the server.}, to reduce memory usage of queued images. This might have impact on the performance of the profiled application. \item Due to implementation details of the network buffer, single frame image cannot be greater than 256 KB after compression. Note that a $960\times540$ image fits in this limit. \end{itemize} \end{bclogo} \paragraph{OpenGL screen capture code example} \label{screenshotcode} There are many pitfalls associated with retrieving screen contents in an efficient way. For example, using \texttt{glReadPixels} and then resizing the image using some library is terrible for performance, as it forces synchronization of the GPU to CPU and performs the downscaling in software. To do things properly we need to scale the image using the graphics hardware and transfer data asynchronously, which allows the GPU to run independently of CPU. The following example shows how this can be achieved using OpenGL 3.2. More recent OpenGL versions allow doing things even better (for example by using persistent buffer mapping), but it won't be covered here. Let's begin by defining the required objects. We need a \emph{texture} to store the resized image, a \emph{framebuffer object} to be able to write to the texture, a \emph{pixel buffer object} to store the image data for access by the CPU and a \emph{fence} to know when the data is ready for retrieval. We need everything in \emph{at least} three copies (we'll use four), because the rendering, as seen in program, may be ahead of the GPU by a couple frames. We need an index to access the appropriate data set in a ring-buffer manner. And finally, we need a queue to store indices to data sets that we are still waiting for. \begin{lstlisting} GLuint m_fiTexture[4]; GLuint m_fiFramebuffer[4]; GLuint m_fiPbo[4]; GLsync m_fiFence[4]; int m_fiIdx = 0; std::vector<int> m_fiQueue; \end{lstlisting} Everything needs to be properly initialized (the cleanup is left for the reader to figure out). \begin{lstlisting} glGenTextures(4, m_fiTexture); glGenFramebuffers(4, m_fiFramebuffer); glGenBuffers(4, m_fiPbo); for(int i=0; i<4; i++) { glBindTexture(GL_TEXTURE_2D, m_fiTexture[i]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 320, 180, 0, GL_RGBA, GL_UNSIGNED_BYTE, nullptr); glBindFramebuffer(GL_FRAMEBUFFER, m_fiFramebuffer[i]); glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_fiTexture[i], 0); glBindBuffer(GL_PIXEL_PACK_BUFFER, m_fiPbo[i]); glBufferData(GL_PIXEL_PACK_BUFFER, 320*180*4, nullptr, GL_STREAM_READ); } \end{lstlisting} We will now setup a screen capture, which will downscale the screen contents to $320\times180$ pixels and copy the resulting image to a buffer which will be accessible by the CPU when the operation is done. This should be placed right before \emph{swap buffers} or \emph{present} call. \begin{lstlisting} assert(m_fiQueue.empty() || m_fiQueue.front() != m_fiIdx); // check for buffer overrun glBindFramebuffer(GL_DRAW_FRAMEBUFFER, m_fiFramebuffer[m_fiIdx]); glBlitFramebuffer(0, 0, res.x, res.y, 0, 0, 320, 180, GL_COLOR_BUFFER_BIT, GL_LINEAR); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); glBindFramebuffer(GL_READ_FRAMEBUFFER, m_fiFramebuffer[m_fiIdx]); glBindBuffer(GL_PIXEL_PACK_BUFFER, m_fiPbo[m_fiIdx]); glReadPixels(0, 0, 320, 180, GL_RGBA, GL_UNSIGNED_BYTE, nullptr); glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); m_fiFence[m_fiIdx] = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0); m_fiQueue.emplace_back(m_fiIdx); m_fiIdx = (m_fiIdx + 1) % 4; \end{lstlisting} And lastly, just before the capture setup code that was just added\footnote{Yes, before. We are handling past screen captures here.} we need to have the image retrieval code. We are checking if the capture operation has finished and if it has, we map the \emph{pixel buffer object} to memory, inform the profiler that there's image data to be handled, unmap the buffer and go to check the next queue item. If a capture is still pending, we break out of the loop and wait until the next frame to check if the GPU has finished the capture. \begin{lstlisting} while(!m_fiQueue.empty()) { const auto fiIdx = m_fiQueue.front(); if(glClientWaitSync(m_fiFence[fiIdx], 0, 0) == GL_TIMEOUT_EXPIRED) break; glDeleteSync(m_fiFence[fiIdx]); glBindBuffer(GL_PIXEL_PACK_BUFFER, m_fiPbo[fiIdx]); auto ptr = glMapBufferRange(GL_PIXEL_PACK_BUFFER, 0, 320*180*4, GL_MAP_READ_BIT); FrameImage(ptr, 320, 180, m_fiQueue.size()); glUnmapBuffer(GL_PIXEL_PACK_BUFFER); m_fiQueue.erase(m_fiQueue.begin()); } \end{lstlisting} Notice that in the call to \texttt{FrameImage} we are passing the remaining queue size as the \texttt{offset} parameter. Queue size represents how many frames ahead our program is relative to the GPU. Since we are sending past frame images we need to specify how many frames behind the images are. Of course if this would be a synchronous capture (without use of fences and with retrieval code after the capture setup), we would set \texttt{offset} to zero, as there would be no frame lag. \subparagraph{High quality capture} The code above uses \texttt{glBlitFramebuffer} function, which can only use nearest neighbor filtering. This can result in low-quality screen shots, as shown on figure~\ref{lowqualityss}. With a bit more work it is possible to obtain much nicer looking screen shots, as presented on figure~\ref{highqualityss}. Unfortunately, you will need to setup a complete rendering pipeline for this to work. First, you need to allocate additional set of intermediate frame buffers and textures, sized the same as the screen. These new textures should have minification filter set to \texttt{GL\_LINEAR\_MIPMAP\_LINEAR}. You will also need to setup everything needed to render a full-screen quad: a simple texturing shader and vertex buffer with appropriate data. Since this vertex buffer will be used to render to the scaled-down framebuffer, you may prepare its contents beforehand and update it only when the aspect ratio would change. With all this done, the screen capture can be performed as follows: \begin{itemize} \item Setup vertex buffer configuration for the full-screen quad buffer (you only need position and uv~coordinates). \item Blit the screen contents to the full-sized framebuffer. \item Bind the texture backing the full-sized framebuffer. \item Generate mip-maps using \texttt{glGenerateMipmap}. \item Set viewport to represent the scaled-down image size. \item Bind vertex buffer data, shader, setup the required uniforms. \item Draw full-screen quad to the scaled-down framebuffer. \item Retrieve framebuffer contents, as in the code above. \item Restore viewport, vertex buffer configuration, bound textures, etc. \end{itemize} While this approach is much more complex than the previously discussed one, the resulting image quality increase makes it worth it. \begin{figure}[h] \centering \begin{minipage}{0.45\textwidth} \centering \includegraphics[width=0.9\textwidth]{images/screenshot-lo.png} \caption{Low-quality screen shot} \label{lowqualityss} \end{minipage}\hfill \begin{minipage}{0.45\textwidth} \centering \includegraphics[width=0.9\textwidth]{images/screenshot-hi.png} \caption{High-quality screen shot} \label{highqualityss} \end{minipage} \end{figure} You can see the performance results you may expect in a simple application in table~\ref{asynccapture}. The na\"ive capture performs synchronous retrieval of full screen image and resizes it using \emph{stb\_image\_resize}. The proper and high quality captures do things as described in this chapter. \begin{table}[h] \centering \begin{tabular}[h]{c|c|c|c} \textbf{Resolution} & \textbf{Na\"ive capture} & \textbf{Proper capture} & \textbf{High quality} \\ \hline $1280\times720$ & 80~FPS & 4200~FPS & 2800~FPS \\ $2560\times1440$ & 23~FPS & 3300~FPS & 1600~FPS \end{tabular} \caption{Frame capture efficiency} \label{asynccapture} \end{table} \subsection{Marking zones} \label{markingzones} To record a zone's\footnote{A \texttt{zone} represents the life-time of a special on-stack profiler variable. Typically it would exist for the duration of a whole scope of the profiled function, but you also can measure time spent in scopes of a for-loop, or an if-branch.} execution time add the \texttt{ZoneScoped} macro at the beginning of the scope you want to measure. This will automatically record function name, source file name and location. Optionally you may use the \texttt{ZoneScopedC(color)} macro to set a custom color for the zone. Note that the color value will be constant in the recording (don't try to parametrize it). You may also set a custom name for the zone, using the \texttt{ZoneScopedN(name)} macro. Color and name may be combined by using the \texttt{ZoneScopedNC(name, color)} macro. Use the \texttt{ZoneText(text, size)} macro to add a custom text string that will be displayed along the zone information (for example, name of the file you are opening). Multiple text strings can be attached to any single zone. If you want to set zone name on a per-call basis, you may do so using the \texttt{ZoneName(text, size)} macro. This name won't be used in the process of grouping the zones for statistical purposes (sections~\ref{statistics} and~\ref{findzone}). \subsubsection{Multiple zones in one scope} \label{multizone} Using the \texttt{ZoneScoped} family of macros creates a stack variable named \texttt{\_\_\_tracy\_scoped\_zone}. If you want to measure more than one zone in the same scope, you will need to use the \texttt{ZoneNamed} macros, which require that you provide a name for the created variable. For example, instead of \texttt{ZoneScopedN("Zone name")}, you would use \texttt{ZoneNamedN(variableName, "Zone name", true)}\footnote{The last parameter is explained in section~\ref{filteringzones}.}. The \texttt{ZoneText} and \texttt{ZoneName} macros work only for the zones created using the \texttt{ZoneScoped} macros. For the \texttt{ZoneNamed} macros, you will need to invoke the methods \texttt{Text} or \texttt{Name} of the variable you have created. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Zone stack} The \texttt{ZoneScoped} macros are imposing creation and usage of an implicit zone stack. You must follow the rules of this stack also when you are using the named macros, which give you some more leeway in doing things. For example, you can only set the text for the zone which is on top of the stack, as you only could do with the \texttt{ZoneText} macro. It doesn't matter that you can call the \texttt{Text} method of a non-top zone which is accessible through a variable. Take a look at the following code: \begin{lstlisting} { ZoneNamed(Zone1, true); @\circled{a}@ { ZoneNamed(Zone2, true); @\circled{b}@ } @\circled{c}@ } \end{lstlisting} It is valid to set the \texttt{Zone1} text or name \emph{only} in places \circled{a} or \circled{c}. After \texttt{Zone2} is created at \circled{b} you can no longer perform operations on \texttt{Zone1}, until \texttt{Zone2} is destroyed. \end{bclogo} \subsubsection{Variable shadowing} The following code is fully compliant with the C++ standard: \begin{lstlisting} void Function() { ZoneScoped; ... for(int i=0; i<10; i++) { ZoneScoped; ... } } \end{lstlisting} This doesn't stop some compilers from dispensing \emph{fashion advice} about variable shadowing (as both \texttt{ZoneScoped} calls create a variable with the same name, with the inner scope one shadowing the one in the outer scope). If you want to avoid these warnings, you will also need to use the \texttt{ZoneNamed} macros. \subsubsection{Filtering zones} \label{filteringzones} Zone logging can be disabled on a per zone basis, by making use of the \texttt{ZoneNamed} macros. Each of the macros takes an \texttt{active} argument ('\texttt{true}' in the example in section~\ref{multizone}), which will determine whether the zone should be logged. Note that this parameter may be a run-time variable, for example an user controlled switch to enable profiling of a specific part of code only when required. If the condition is constant at compile-time, the resulting code will not contain a branch (the profiling code will either be always enabled, or won't be there at all). The following listing presents how profiling of specific application subsystems might be implemented: \begin{lstlisting} enum SubSystems { Sys_Physics = 1 << 0, Sys_Rendering = 1 << 1, Sys_NasalDemons = 1 << 2 } ... // Preferably a define in the build system #define SUBSYSTEMS Sys_Physics | Sys_NasalDemons ... void Physics::Process() { ZoneScopedN( __tracy, SUBSYSTEMS & Sys_Physics ); // always true, no runtime cost ... } void Graphics::Render() { ZoneScopedN( __tracy, SUBSYSTEMS & Sys_Graphics ); // always false, no runtime cost ... } \end{lstlisting} \subsubsection{Manual management of zone scope} The zone markup macros automatically report when they end, through the RAII mechanism\footnote{\url{https://en.cppreference.com/w/cpp/language/raii}}. This is very helpful, but sometimes you may want to mark the zone start and end points yourself, for example if you want to have a zone that crosses the function's boundary. This can be achieved by using the C API, which is described in section~\ref{capi}. \subsubsection{Exiting program from within a zone} At the present time exiting the profiled application from inside a zone is not supported. When the client calls \texttt{exit()}, the profiler will wait for all zones to end, before a program can be truly terminated. If program execution stopped inside a zone, this will never happen, and the profiled application will seemingly hang up. At this point you will need to manually terminate the program (or simply disconnect the profiler server). As a workaround, you may add a \texttt{try}/\texttt{catch} pair at the bottom of the function stack (for example in the \texttt{main()} function) and replace \texttt{exit()} calls with throwing a custom exception. When this exception is caught, you may call \texttt{exit()}, knowing that the application's data structures (including profiling zones) were properly cleaned up. \subsection{Marking locks} Modern programs must use multi-threading to achieve full performance capability of the CPU. Correct execution requires claiming exclusive access to data shared between threads. When many threads want to enter the same critical section at once, the application's multi-threaded performance advantage is nullified. To help solve this problem, Tracy can collect and display lock interactions in threads. To mark a lock (mutex) for event reporting, use the \texttt{TracyLockable(type, varname)} macro. Note that the lock must implement the Mutex requirement\footnote{\url{https://en.cppreference.com/w/cpp/named_req/Mutex}} (i.e.\ there's no support for timed mutices). For a concrete example, you would replace the line \begin{lstlisting} std::mutex m_lock; \end{lstlisting} with \begin{lstlisting} TracyLockable(std::mutex, m_lock); \end{lstlisting} Alternatively, you may use \texttt{TracyLockableN(type, varname, description)} to provide a custom lock name at a global level, which will replace the automatically generated '\texttt{std::mutex m\_lock}'-like name. You may also set a custom name for a specific instance of a lock, through the \texttt{LockableName(varname, name, size)} macro. The standard \texttt{std::lock\_guard} and \texttt{std::unique\_lock} wrappers should use the \texttt{LockableBase(type)} macro for their template parameter (unless you're using C++17, with improved template argument deduction). For example: \begin{lstlisting} std::lock_guard<LockableBase(std::mutex)> lock(m_lock); \end{lstlisting} To mark the location of a lock being held, use the \texttt{LockMark(varname)} macro, after you have obtained the lock. Note that the \texttt{varname} must be a lock variable (a reference is also valid). This step is optional. Similarly, you can use \texttt{TracySharedLockable}, \texttt{TracySharedLockableN} and \texttt{SharedLockableBase} to mark locks implementing the SharedMutex requirement\footnote{\url{https://en.cppreference.com/w/cpp/named_req/SharedMutex}}. Note that while there's no support for timed mutices in Tracy, both \texttt{std::shared\_mutex} and \texttt{std::shared\_timed\_mutex} may be used\footnote{Since \texttt{std::shared\_mutex} was added in C++17, using \texttt{std::shared\_timed\_mutex} is the only way to have shared mutex functionality in C++14.}. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bclampe ]{Condition variables} The standard \texttt{std::condition\_variable} is only able to accept \texttt{std::mutex} locks. To be able to use Tracy lock wrapper, use \texttt{std::condition\_variable\_any} instead. \end{bclogo} \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} Due to limits of internal bookkeeping in the profiler, each lock may be used in no more than 64 unique threads. If you have many short lived temporary threads, consider using a thread pool to limit the numbers of created threads. \end{bclogo} \subsubsection{Custom locks} If using the \texttt{TracyLockable} or \texttt{TracySharedLockable} wrappers does not fit your needs, you may want to add a more fine-grained instrumentation to your code. Classes \texttt{LockableCtx} and \texttt{SharedLockableCtx} contained in the \texttt{TracyLock.hpp} header contain all the required functionality. Lock implementations in classes \texttt{Lockable} and \texttt{SharedLockable} show how to properly perform context handling. \subsection{Plotting data} \label{plottingdata} Tracy is able to capture and draw numeric value changes over time. You may use it to analyze draw call counts, number of performed queries, etc. To report data, use the \texttt{TracyPlot(name, value)} macro. To configure how plot values are presented by the profiler, you may use the \texttt{TracyPlotConfig(name, format)} macro, where \texttt{format} is one of the following options: \begin{itemize} \item \texttt{tracy::PlotFormatType::Number} -- values will be displayed as plain numbers. \item \texttt{tracy::PlotFormatType::Memory} -- treats the values as memory sizes. Will display kilobytes, megabytes, etc. \item \texttt{tracy::PlotFormatType::Percentage} -- values will be displayed as percentage (with value $100$ being equal to $100\%$). \end{itemize} \subsection{Message log} \label{messagelog} Fast navigation in large data sets and correlating zones with what was happening in application may be difficult. To ease these issues Tracy provides a message log functionality. You can send messages (for example, your typical debug output) using the \texttt{TracyMessage(text, size)} macro. Alternatively, use \texttt{TracyMessageL(text)} for string literal messages. If you want to include color coding of the messages (for example to make critical messages easily visible), you can use \texttt{TracyMessageC(text, size, color)} or \texttt{TracyMessageLC(text, color)} macros. \subsubsection{Application information} \label{appinfo} Tracy can collect additional information about the profiled application, which will be available in the trace description. This can include data such as the source repository revision, the environment in which application is running (dev/prod), etc. Use the \texttt{TracyAppInfo(text, size)} macro to report the data. \subsection{Memory profiling} \label{memoryprofiling} Tracy can monitor memory usage of your application. Knowledge about each performed memory allocation enables the following: \begin{itemize} \item Memory usage graph (like in massif, but fully interactive). \item List of active allocations at program exit (memory leaks). \item Visualization of memory map. \item Ability to rewind view of active allocations and memory map to any point of program execution. \item Information about memory statistics of each zone. \item Memory allocation hot-spot tree. \end{itemize} To mark memory events, use the \texttt{TracyAlloc(ptr, size)} and \texttt{TracyFree(ptr)} macros. Typically you would do that in overloads of \texttt{operator new} and \texttt{operator delete}, for example: \begin{lstlisting} void* operator new(std::size_t count) { auto ptr = malloc(count); TracyAlloc(ptr, count); return ptr; } void operator delete(void* ptr) noexcept { TracyFree(ptr); free(ptr); } \end{lstlisting} \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} Each tracked memory free event must also have a corresponding memory allocation event. Tracy will terminate the profiling session if this assumption is broken (see section~\ref{instrumentationfailures}). If you encounter this issue, you may want to check for: \begin{itemize} \item Mismatched \texttt{malloc}/\texttt{new} or \texttt{free}/\texttt{delete}. \item Double freeing the memory. \item Untracked allocations made in external libraries, that are freed in the application. \item Places where the memory is allocated, but profiling markup is added. \end{itemize} This requirement is relaxed in the on-demand mode (section~\ref{ondemand}), because the memory allocation event might have happened before the connection was made. \end{bclogo} \subsection{GPU profiling} \label{gpuprofiling} Tracy provides bindings for profiling OpenGL and Vulkan execution time on GPU. Note that the CPU and GPU timers may be not synchronized. You can correct the resulting desynchronization in the profiler's options (section~\ref{options}). \subsubsection{OpenGL} You will need to include the \texttt{tracy/TracyOpenGL.hpp} header file and declare each of your rendering contexts using the \texttt{TracyGpuContext} macro (typically you will only have one context). Tracy expects no more than one context per thread and no context migration. To mark a GPU zone use the \texttt{TracyGpuZone(name)} macro, where \texttt{name} is a string literal name of the zone. Alternatively you may use \texttt{TracyGpuZoneC(name, color)} to specify zone color. You also need to periodically collect the GPU events using the \texttt{TracyGpuCollect} macro. A good place to do it is after the swap buffers function call. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} \begin{itemize} \item GPU profiling is not supported on OSX, iOS\footnote{Because Apple is unable to implement standards properly.}. \item Android devices do work, if GPU drivers are not broken. Disjoint events are not currently handled, so some readings may be a bit spotty. \item Nvidia drivers are unable to provide consistent timing results when two OpenGL contexts are used simultaneously. \item Calling the \texttt{TracyGpuCollect} macro is a fairly slow operation (couple \si{\micro\second}). \end{itemize} \end{bclogo} \subsubsection{Vulkan} Similarly, for Vulkan support you should include the \texttt{tracy/TracyVulkan.hpp} header file. Tracing Vulkan devices and queues is a bit more involved, and the Vulkan initialization macro \texttt{TracyVkContext(physdev, device, queue, cmdbuf)} returns an instance of \texttt{TracyVkCtx} object, which tracks an associated Vulkan queue. Cleanup is performed using the \texttt{TracyVkDestroy(ctx)} macro. You may create multiple Vulkan contexts. The physical device, logical device, queue and command buffer must relate with each other. The queue must support graphics or compute operations. The command buffer must be in the initial state and be able to be reset. It will be rerecorded and submitted to the queue multiple times and it will be in the executable state on exit from the initialization function. To mark a GPU zone use the \texttt{TracyVkZone(ctx, cmdbuf, name)} macro, where \texttt{name} is a string literal name of the zone. Alternatively you may use \texttt{TracyVkZoneC(ctx, cmdbuf, name, color)} to specify zone color. The provided command buffer must be in the recording state and it must be created within the queue that is associated with \texttt{ctx} context. You also need to periodically collect the GPU events using the \texttt{TracyVkCollect(ctx, cmdbuf)} macro\footnote{It is considerably faster than the OpenGL's \texttt{TracyGpuCollect}.}. The provided command buffer must be in the recording state and outside of a render pass instance. \subsubsection{Multiple zones in one scope} Putting more than one GPU zone macro in a single scope features the same issue as with the \texttt{ZoneScoped} macros, described in section~\ref{multizone} (but this time the variable name is \texttt{\_\_\_tracy\_gpu\_zone}). To solve this problem, in case of OpenGL use the \texttt{TracyGpuNamedZone} macro in place of \texttt{TracyGpuZone} (or the color variant). The same applies to Vulkan -- replace \texttt{TracyVkZone} with \texttt{TracyVkNamedZone}. Remember that you need to provide your own name for the created stack variable as the first parameter to the macros. \subsection{Collecting call stacks} \label{collectingcallstacks} Capture of true calls stacks can be performed by using macros with the \texttt{S} postfix, which require an additional parameter, specifying the depth of call stack to be captured. The greater the depth, the longer it will take to perform capture. Currently you can use the following macros: \texttt{ZoneScopedS}, \texttt{ZoneScopedNS}, \texttt{ZoneScopedCS}, \texttt{ZoneScopedNCS}, \texttt{TracyAllocS}, \texttt{TracyFreeS}, \texttt{TracyMessageS}, \texttt{TracyMessageLS}, \texttt{TracyMessageCS}, \texttt{TracyMessageLCS}, \texttt{TracyGpuZoneS}, \texttt{TracyGpuZoneCS}, \texttt{TracyVkZoneS}, \texttt{TracyVkZoneCS}, and the named variants. Be aware that call stack collection is a relatively slow operation. Table~\ref{CallstackTimes} and figure~\ref{CallstackPlot} show how long it took to perform a single capture of varying depth on multiple CPU architectures. \begin{table}[h] \centering \begin{tabular}[h]{c|c|c|c|c} \textbf{Depth} & \textbf{x86} & \textbf{x64} & \textbf{ARM} & \textbf{ARM64} \\ \hline 1 & 34 \si{\nano\second} & 98 \si{\nano\second} & 6.62 \si{\micro\second} & 6.63 \si{\micro\second} \\ 2 & 35 \si{\nano\second} & 150 \si{\nano\second} & 8.08 \si{\micro\second} & 8.25 \si{\micro\second} \\ 3 & 36 \si{\nano\second} & 168 \si{\nano\second} & 9.75 \si{\micro\second} & 10 \si{\micro\second} \\ 4 & 39 \si{\nano\second} & 190 \si{\nano\second} & 10.92 \si{\micro\second} & 11.58 \si{\micro\second} \\ 5 & 42 \si{\nano\second} & 206 \si{\nano\second} & 12.5 \si{\micro\second} & 13.33 \si{\micro\second} \\ 10 & 52 \si{\nano\second} & 306 \si{\nano\second} & 19.62 \si{\micro\second} & 21.71 \si{\micro\second} \\ 15 & 63 \si{\nano\second} & 415 \si{\nano\second} & 26.83 \si{\micro\second} & 30.13 \si{\micro\second} \\ 20 & 77 \si{\nano\second} & 531 \si{\nano\second} & 34.25 \si{\micro\second} & 38.71 \si{\micro\second} \\ 25 & 89 \si{\nano\second} & 630 \si{\nano\second} & 41.17 \si{\micro\second} & 47.17 \si{\micro\second} \\ 30 & 109 \si{\nano\second} & 735 \si{\nano\second} & 48.33 \si{\micro\second} & 55.63 \si{\micro\second} \\ 35 & 123 \si{\nano\second} & 843 \si{\nano\second} & 55.87 \si{\micro\second} & 64.09 \si{\micro\second} \\ 40 & 142 \si{\nano\second} & 950 \si{\nano\second} & 63.12 \si{\micro\second} & 72.59 \si{\micro\second} \\ 45 & 154 \si{\nano\second} & 1.05 \si{\micro\second} & 70.54 \si{\micro\second} & 81 \si{\micro\second} \\ 50 & 167 \si{\nano\second} & 1.16 \si{\micro\second} & 78 \si{\micro\second} & 89.5 \si{\micro\second} \\ 55 & 179 \si{\nano\second} & 1.26 \si{\micro\second} & 85.04 \si{\micro\second} & 98 \si{\micro\second} \\ 60 & 193 \si{\nano\second} & 1.37 \si{\micro\second} & 92.75 \si{\micro\second} & 106.59 \si{\micro\second} \end{tabular} \caption{Median times of zone capture with call stack. x86, x64: i7 8700K; ARM: Banana Pi; ARM64: ODROID-C2. Selected architectures are plotted on figure~\ref{CallstackPlot}} \label{CallstackTimes} \end{table} \begin{figure}[h] \centering\begin{tikzpicture} \begin{axis}[xlabel=Call stack depth,ylabel=Time (\si{\nano\second}), legend pos=north west] \addplot[smooth, mark=o, red] plot coordinates { (1, 98) (2, 150) (3, 168) (4, 190) (5, 206) (10, 306) (15, 415) (20, 531) (25, 630) (30, 735) (35, 843) (40, 950) (45, 1050) (50, 1160) (55, 1260) (60, 1370) }; \addlegendentry{x64} \addplot[smooth, mark=x, blue] plot coordinates { (1, 34) (2, 35) (3, 36) (4, 39) (5, 42) (10, 52) (15, 63) (20, 77) (25, 89) (30, 109) (35, 123) (40, 142) (45, 154) (50, 167) (55, 179) (60, 193) }; \addlegendentry{x86} \end{axis} \end{tikzpicture} \caption{Plot of call stack capture times (see table~\ref{CallstackTimes}). Notice that the capture time grows linearly with requested capture depth} \label{CallstackPlot} \end{figure} You can force call stack capture in the non-\texttt{S} postfixed macros by adding the \texttt{TRACY\_CALLSTACK} define, set to the desired call stack capture depth. This setting doesn't affect the explicit call stack macros. The maximum call stack depth that can be retrieved is 62 frames. This is a restriction at the level of operating system. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Debugging symbols} To have proper call stack information, the profiled application must be compiled with debugging symbols enabled. You can achieve that in the following way: \begin{itemize} \item On MSVC open the project properties and go to \emph{Linker\textrightarrow Debugging\textrightarrow Generate Debug Info}, where the \emph{Generate Debug Information} option should be selected. \item On gcc or clang remember to specify the debugging information \texttt{-g} parameter during compilation and omit the strip symbols \texttt{-s} parameter. Link the executable with an additional option \texttt{-rdynamic} (or \texttt{-{}-export-dynamic}, if you are passing parameters directly to the linker). \end{itemize} You may also be interested in symbols from external libraries, especially if you have sampling profiling enabled (section~\ref{sampling}). In MSVC you can retrieve such symbols by going to \emph{Tools\textrightarrow Options\textrightarrow Debugging\textrightarrow Symbols} and selecting appropriate \emph{Symbol file (.pdb) location} servers. Note that additional symbols may significantly increase application startup times. \end{bclogo} \subsection{Lua support} To profile Lua code using Tracy, include the \texttt{tracy/TracyLua.hpp} header file in your Lua wrapper and execute \texttt{tracy::LuaRegister(lua\_State*)} function to add instrumentation support. In the Lua code, add \texttt{tracy.ZoneBegin()} and \texttt{tracy.ZoneEnd()} calls to mark execution zones. You need to call the \texttt{ZoneEnd} method, because there is no automatic destruction of variables in Lua and we don't know when the garbage collection will be performed. \emph{Double check if you have included all return paths!} Use \texttt{tracy.ZoneBeginN(name)} if you want to set a custom zone name\footnote{While technically this name doesn't need to be constant, like in the \texttt{ZoneScopedN} macro, it should be, as it is used to group the zones together. This grouping is then used to display various statistics in the profiler. You may still set the per-call name using the \texttt{tracy.ZoneName} method.}. Use \texttt{tracy.ZoneText(text)} to set zone text. Use \texttt{tracy.Message(text)} to send messages. Use \texttt{tracy.ZoneName(text)} to set zone name on a per-call basis. Lua instrumentation needs to perform additional work (including memory allocation) to store source location. This approximately doubles the data collection cost. \subsubsection{Call stacks} To collect Lua call stacks (see section~\ref{collectingcallstacks}), replace \texttt{tracy.ZoneBegin()} calls with \texttt{tracy.ZoneBeginS(depth)}, and \texttt{tracy.ZoneBeginN(name)} calls with \texttt{tracy.ZoneBeginNS(name, depth)}. Using the \texttt{TRACY\_CALLSTACK} macro automatically enables call stack collection in all zones. Be aware that for Lua call stack retrieval to work, you need to be on a platform which supports collection of native call stacks. Cost of performing Lua call stack capture is presented in table~\ref{CallstackTimesLua} and figure~\ref{CallstackPlotLua}. Lua call stacks include native call stacks, which have a capture cost of their own (table~\ref{CallstackTimes}) and the \texttt{depth} parameter is applied for both captures. The presented data was captured with full Lua stack depth, but only 13 frames were available on the native call stack. Hence, to explain the non-linearity of the graph you need to consider what was really measured: \begin{displaymath} \text{Cost}_{\text{total}}(\text{depth}) = \begin{cases} \text{Cost}_{\text{Lua}}(\text{depth}) + \text{Cost}_{\text{native}}(\text{depth}) & \text{when depth} \leq 13 \\ \text{Cost}_{\text{Lua}}(\text{depth}) + \text{Cost}_{\text{native}}(13) & \text{when depth} > 13 \end{cases} \end{displaymath} \begin{table}[h] \centering \begin{tabular}[h]{c|c} \textbf{Depth} & \textbf{Time} \\ \hline 1 & 707 \si{\nano\second} \\ 2 & 699 \si{\nano\second} \\ 3 & 624 \si{\nano\second} \\ 4 & 727 \si{\nano\second} \\ 5 & 836 \si{\nano\second} \\ 10 & 1.77 \si{\micro\second} \\ 15 & 2.44 \si{\micro\second} \\ 20 & 2.51 \si{\micro\second} \\ 25 & 2.98 \si{\micro\second} \\ 30 & 3.6 \si{\micro\second} \\ 35 & 4.33 \si{\micro\second} \\ 40 & 5.17 \si{\micro\second} \\ 45 & 6.01 \si{\micro\second} \\ 50 & 6.99 \si{\micro\second} \\ 55 & 8.11 \si{\micro\second} \\ 60 & 9.17 \si{\micro\second} \end{tabular} \caption{Median times of Lua zone capture with call stack (x64, 13 native frames)} \label{CallstackTimesLua} \end{table} \begin{figure}[h] \centering\begin{tikzpicture} \begin{axis}[xlabel=Call stack depth,ylabel=Time (\si{\micro\second}), legend pos=north west] \addplot[smooth, mark=o, red] plot coordinates { (1, 0.707) (2, 0.699) (3, 0.624) (4, 0.727) (5, 0.836) (10, 1.770) (15, 2.440) (20, 2.510) (25, 2.980) (30, 3.600) (35, 4.330) (40, 5.170) (45, 6.010) (50, 6.990) (55, 8.110) (60, 9.170) }; \end{axis} \end{tikzpicture} \caption{Plot of call Lua stack capture times (see table~\ref{CallstackTimesLua})} \label{CallstackPlotLua} \end{figure} \subsubsection{Instrumentation cleanup} Even if Tracy is disabled, you still have to pay the no-op function call cost. To prevent that you may want to use the \texttt{tracy::LuaRemove(char* script)} function, which will replace instrumentation calls with white-space. This function does nothing if profiler is enabled. \subsection{C API} \label{capi} In order to profile code written in C programming language, you will need to include the \texttt{tracy/TracyC.h} header file, which exposes the C API. At the moment there's no support for C API based markup of locks, OpenGL, Vulkan or Lua. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} Tracy is written in C++, so you will need to have a C++ compiler and link with C++ standard library, even if your program is strictly pure C. \end{bclogo} \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} If you are using MSVC, you will need to disable the \emph{Edit And Continue} feature, for the C API to work\footnote{There's no such requirement for C++ API.}. To do so, open the project properties and go to \emph{C/C++\textrightarrow General\textrightarrow Debug Information Format} and make sure \emph{Program Database for Edit And Continue (/ZI)} is \emph{not} selected. \end{bclogo} \subsubsection{Frame markup} To mark frames, as described in section~\ref{markingframes}, use the following macros: \begin{itemize} \item \texttt{TracyCFrameMark} \item \texttt{TracyCFrameMarkNamed(name)} \item \texttt{TracyCFrameMarkStart(name)} \item \texttt{TracyCFrameMarkEnd(name)} \item \texttt{TracyCFrameImage(image, width, height, offset, flip)} \end{itemize} \subsubsection{Zone markup} \label{czonemarkup} The following macros mark the beginning of a zone: \begin{itemize} \item \texttt{TracyCZone(ctx, active)} \item \texttt{TracyCZoneN(ctx, name, active)} \item \texttt{TracyCZoneC(ctx, color, active)} \item \texttt{TracyCZoneNC(ctx, name, color, active)} \end{itemize} Refer to sections~\ref{markingzones} and~\ref{multizone} for description of macro variants and parameters. The \texttt{ctx} parameter specifies the name of a data structure, which will be created on stack to hold the internal zone data. Unlike C++, there's no automatic destruction mechanism in C, so you will need to manually mark where the zone ends. To do so use the \texttt{TracyCZoneEnd(ctx)} macro. Zone text and name may be set by using the \texttt{TracyCZoneText(ctx, txt, size)} and \texttt{TracyCZoneName(ctx, txt, size)} macros. Make sure you are following the zone stack rules, as described in section~\ref{multizone}! \paragraph{Zone context data structure} \label{zonectx} In typical use cases the zone context data structure is hidden from your view, requiring only to specify its name for the \texttt{TracyCZone} and \texttt{TracyCZoneEnd} macros. However, it is possible to use it in advanced scenarios, for example if you want to start a zone in one function, then end it in another one. To do so you will need to forward the data structure either through a function parameter, or as a return value. To accomplish this you need to keep in mind the following rules: \begin{itemize} \item The created variable name is exactly what you pass as the \texttt{ctx} parameter. \item The data structure is of an opaque, immutable type \texttt{TracyCZoneCtx}. \item Contents of the data structure can be copied by assignment. Do not retrieve or use the structure's address -- this is asking for trouble. \item You \emph{must} use the data structure (or any of its copies) exactly \emph{once} to end a zone. \end{itemize} \paragraph{Zone validation} Since all instrumentation using the C API has to be done by hand, it is possible to miss some code paths where a zone should be started or ended. Tracy will perform additional validation of instrumentation correctness to prevent bad profiling runs. Read section~\ref{instrumentationfailures} for more information. The validation comes with a performance cost though, which you may not want to pay. If you are \emph{completely sure} that the instrumentation is not broken in any way, you may use the \texttt{TRACY\_NO\_VERIFY} macro, which will disable the validation code. \paragraph{Allocated source locations} Sometimes you might want to provide your own source location data to a zone. For example, you may be integrating Tracy with another programming language, one where there are no guarantees about object lifetime, or how data structures will be laid out in the memory. To do so, you need to create an \emph{allocated source location}, by calling one of the following functions: \begin{itemize} \item \texttt{\_\_\_tracy\_alloc\_srcloc(uint32\_t line, const char* source, const char* function)} \item \texttt{\_\_\_tracy\_alloc\_srcloc\_name(uint32\_t line, const char* source, const char* function, const char* name, size\_t nameSz)} \end{itemize} Where \texttt{line} is line number in the \texttt{source} source file and \texttt{function} is the name of a function in which zone is created. Both \texttt{source} and \texttt{function} must be null-terminated strings. You may additionally specify an optional zone name, by providing it in the \texttt{name} variable, and specifying its size in \texttt{nameSz}. None of the provided text strings has to be kept in memory after source location is allocated. Both functions return an \texttt{uint64\_t} source location value, which then must be passed to one of the zone begin functions: \begin{itemize} \item \texttt{\_\_\_tracy\_emit\_zone\_begin\_alloc(srcloc, active)} \item \texttt{\_\_\_tracy\_emit\_zone\_begin\_alloc\_callstack(srcloc, depth, active)} \end{itemize} These functions return a \texttt{TracyCZoneCtx} context value, which must be handled, as described in sections~\ref{czonemarkup} and~\ref{zonectx}. The variable representing an allocated source location is of an opaque type. After it is passed to one of the zone begin functions, its value \emph{cannot be reused}. You must allocate a new source location for each zone begin event. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} Since you are directly calling the profiler functions here, you will need to take care of manually disabling the code, if the \texttt{TRACY\_ENABLE} macro is not defined. \end{bclogo} \subsubsection{Memory profiling} Use the following macros in your implementations of \texttt{malloc} and \texttt{free}: \begin{itemize} \item \texttt{TracyCAlloc(ptr, size)} \item \texttt{TracyCFree(ptr)} \end{itemize} Using this functionality in a proper way can be quite tricky, as you also will need to handle all the memory allocations made by external libraries (which typically allow usage of custom memory allocation functions), but also the allocations made by system functions. If such an allocation can't be tracked, you will need to make sure freeing is not reported\footnote{It's not uncommon to see a pattern where a system function returns some allocated memory, which you then need to free.}. There is no explicit support for \texttt{realloc} function. You will need to handle it by marking memory allocations and frees, according to the system manual describing behavior of this routine. For more information refer to section~\ref{memoryprofiling}. \subsubsection{Plots and messages} To send additional markup in form of plot data points or messages use the following macros: \begin{itemize} \item \texttt{TracyCPlot(name, val)} \item \texttt{TracyCMessage(txt, size)} \item \texttt{TracyCMessageL(txt)} \item \texttt{TracyCMessageC(txt, size, color)} \item \texttt{TracyCMessageLC(txt, color)} \item \texttt{TracyCAppInfo(txt, size)} \end{itemize} Consult sections~\ref{plottingdata} and~\ref{messagelog} for more information. \subsubsection{Call stacks} You can collect call stacks of zones and memory allocation events, as described in section~\ref{collectingcallstacks}, by using the following \texttt{S} postfixed macros: \texttt{TracyCZoneS}, \texttt{TracyCZoneNS}, \texttt{TracyCZoneCS}, \texttt{TracyCZoneNCS}, \texttt{TracyCAllocS}, \texttt{TracyCFreeS}, \texttt{TracyCMessageS}, \texttt{TracyCMessageLS}, \texttt{TracyCMessageCS}, \texttt{TracyCMessageLCS}. \subsection{Automated data collection} \label{automated} Tracy will perform automatic collection of system data without user intervention. This behavior is platform specific and may not be available everywhere. Refer to section~\ref{featurematrix} for more information. \subsubsection{CPU usage} System-wide CPU load is gathered with relatively high granularity (one reading every 100 \si{\milli\second}). The readings are available as a plot (see section~\ref{plots}). Note that this parameter takes into account all applications running on the system, not only the profiled program. \subsubsection{Context switches} \label{contextswitches} Since the profiled program is executing simultaneously with other applications, you can't have exclusive access to the CPU. The multitasking operating system's scheduler is giving threads waiting to execute short time slices, where part of the work can be done. Afterwards threads are preempted to give other threads a chance to run. This ensures that each program running in the system has a fair environment and no program can hog the system resources for itself. As a corollary, it is often not enough to know how long it took to execute a zone. The thread in which a zone was running might have been suspended by the system, which artificially increases the time readings. To solve this problem, Tracy collects context switch\footnote{A context switch happens when any given CPU core stops executing one thread and starts running another one.} information. This data can be then used to see when a zone was in the executing state and where it was waiting to be resumed. Context switch data capture may be disabled by adding the \texttt{TRACY\_NO\_SYSTEM\_TRACING} define to the client. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} \begin{itemize} \item Context switch data is retrieved using the kernel profiling facilities, which are not available to users with normal privilege level. To collect context switches you will need to elevate your rights to admin level, either by running the profiled program from the \texttt{root} account on Unix, or through the \emph{Run as administrator} option on Windows. On Android context switches will be collected if you have a rooted device (see section~\ref{androidlunacy} for additional information). \item Android context switch capture requires spawning an elevated process to read kernel data. While the standard \texttt{cat} utility can be used for this task, the CPU usage is not acceptable due to how the kernel handles blocking reads. As a workaround, Tracy will inject a specialized kernel data reader program at \texttt{/data/tracy\_systrace}, which has more acceptable resource requirements. \end{itemize} \end{bclogo} \subsubsection{CPU topology} \label{cputopology} Tracy may perform discovery of CPU topology data in order to provide further information about program performance characteristics. It is very useful when combined with context switches (section~\ref{contextswitches}). In essence, the topology information gives you context about what any given \emph{logical CPU} really is and how it relates to other logical CPUs. The topology hierarchy consists of packages, cores and threads. Packages contain cores and shared resources, such as memory controller, L3 cache, etc. A store-bought CPU is an example of a package. While you may think that multi-package configurations would be a domain of servers, they are actually quite common in the mobile devices world, with many platforms using the \emph{big.LITTLE} arrangement of two packages. Cores contain at least one thread and shared resources: execution units, L1 and L2 cache, etc. Threads (or \emph{logical CPUs}; not to be confused with program threads) are basically the processor instruction pipelines. A pipeline might become stalled, for example due to pending memory access, leaving core resources unused. To reduce this bottleneck, some CPUs may use simultaneous multithreading\footnote{Commonly known as Hyper-threading.}, in which more than one pipeline will be using a single physical core resources. Knowing which package and core any logical CPU belongs to enables many insights. For example, two threads scheduled to run on the same core will compete for shared execution units and cache, resulting in reduced performance. Or, a migration of a program thread from one core to another core will invalidate L1 and L2 cache, which is less costly than a migration from one package to another, which also invalidates L3 cache. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} In this manual, the word \emph{core} is typically used as a short term for \emph{logical CPU}. Do not confuse it with physical processor cores. \end{bclogo} \subsubsection{Call stack sampling} \label{sampling} Manual markup of zones doesn't cover every function existing in a program and cannot be performed in system libraries, or in kernel. This can leave blank spaces on the trace, leaving you with no clue what the application was doing. Tracy is able to periodically inspect state of running threads, providing you with a snapshot of call stack at the time when sampling was performed. While this information doesn't have the fidelity of manually inserted zones, it can sometimes give you an insight where to go next. This feature requires privilege elevation, as described in chapter~\ref{contextswitches}. Proper setup of the required program debugging data is described in chapter~\ref{collectingcallstacks}. \subsubsection{Executable code retrieval} \label{executableretrieval} To enable deep insight into program execution, Tracy will capture small chunks of the executable image during profiling. The retrieved code can be subsequently disassembled to be inspected in detail. This functionality will be performed only for functions that are no larger than 64 KB and only if symbol information is present. You should be extra careful when working with non-public code, as parts of your program will be embedded in the captured trace. \subsection{Trace parameters} \label{traceparameters} Sometimes it is desired to change how the profiled application is behaving during the profiling run, for example you may want to enable or disable capture of frame images without recompiling and restarting your program. To be able to do so you must register a callback function using the \texttt{TracyParameterRegister(callback)} macro, where \texttt{callback} is a function conforming to the following signature: \begin{lstlisting} void Callback(uint32_t idx, int32_t val) \end{lstlisting} The \texttt{idx} argument is an user-defined parameter index and \texttt{val} is the value set in the profiler user interface. To specify individual parameters, use the \texttt{TracyParameterSetup(idx, name, isBool, val)} macro. The \texttt{idx} value will be passed to the callback function for identification purposes (Tracy doesn't care what it's set to). \texttt{Name} is the parameter label, displayed on the list of parameters. Finally, \texttt{isBool} determines if \texttt{val} should be interpreted as a boolean value, or as an integer number. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} Usage of trace parameters makes profiling runs dependent on user interaction with the profiler, and thus it's not recommended to be employed if a consistent profiling environment is desired. Furthermore, interaction with the parameters is only possible in the graphical profiling application, and not in the command line capture utility. \end{bclogo} \section{Capturing the data} \label{capturing} After the client application has been instrumented, you will want to connect to it using a server, which is available either as a headless capture-only utility, or as a full-fledged graphical profiling interface. \subsection{Command line} You can capture a trace using a command line utility contained in the \texttt{capture} directory. To use it you will need to provide the following parameters: \begin{itemize} \item \texttt{-o output.tracy} -- the file name of the resulting trace. \item \texttt{-a address} -- specifies the IP address (or a domain name) of the client application (uses \texttt{localhost} if not provided). \item \texttt{-p port} -- network port which should be used (optional). \end{itemize} If there is no client running at the given address, the server will wait until a connection can be made. During the capture the following information will be displayed: \begin{verbatim} % ./capture -a 127.0.0.1 -o trace Connecting to 127.0.0.1:8086... Queue delay: 5 ns Timer resolution: 3 ns 1.33 Mbps / 40.4% = 3.29 Mbps | Net: 64.42 MB | Mem: 283.03 MB | Time: 10.6 s \end{verbatim} The \emph{queue delay} and \emph{timer resolution} parameters are calibration results of timers used by the client. The next line is a status bar, which displays: network connection speed, connection compression ratio, and the resulting uncompressed data rate; total amount of data transferred over the network; memory usage of the capture utility; time extent of the captured data. You can disconnect from the client and save the captured trace by pressing \keys{\ctrl + C}. \subsection{Interactive profiling} \label{interactiveprofiling} If you want to look at the profile data in real-time (or load a saved trace file), you can use the data analysis utility contained in the \texttt{profiler} directory. After starting the application, you will be greeted with a welcome dialog (figure~\ref{welcomedialog}), presenting a bunch of useful links (\faBook{}~\emph{User manual}, \faGlobeAmericas{}~\emph{Web}, \faComment~\emph{Join chat} and \faHeart{}~\emph{Sponsor}). The \faGlobeAmericas{}~\emph{Web} button opens a drop-down list with links to the profiler's \emph{\faHome{}~Home page} and a bunch of \emph{\faVideo{}~Feature videos}. The client \emph{address entry} field and the \faWifi{}~\emph{Connect} button are used to connect to a running client\footnote{Note that a custom port may be provided here, for example by entering '127.0.0.1:1234'.}. You can use the connection history button~\faCaretDown{} to display a list of commonly used targets, from which you can quickly select an address. You can remove entries from this list by hovering the \faMousePointer{}~mouse cursor over an entry and pressing the \keys{\del} button on the keyboard. If you want to open a trace that you have stored on the disk, you can do so by pressing the \faFolderOpen{}~\emph{Open saved trace} button. The \emph{discovered clients} list is only displayed if there are clients broadcasting their presence on the local network\footnote{Only on IPv4 networks and only within the broadcast domain.}. Each entry shows the address\footnote{Either as an IP address, or as a host name, if able to resolve.} of the client (and optionally port, if different from the default one), how long the client has been running, and the name of the application that is profiled. Clicking on an entry will connect to the client. Incompatible clients are grayed-out and can't be connected to. Clicking on the \emph{\faFilter{}~Filter} toggle button will display client filtering input fields, allowing removal of the displayed entries, according to their address, port number, or program name. If filters are active, a yellow \faExclamationTriangle{}~warning icon will be displayed. \begin{figure}[h] \centering\begin{tikzpicture} \draw (0, 0) rectangle (4.95, -3.2); \draw[pattern=crosshatch dots] (0, 0) rectangle+(4.95, 0.3); \draw[rounded corners=5pt] (0.1, -0.1) rectangle+(1.1, -0.5) node [midway] {\faBook}; \draw[rounded corners=5pt] (1.3, -0.1) rectangle+(1.1, -0.5) node [midway] {\faGlobeAmericas}; \draw[rounded corners=5pt] (2.5, -0.1) rectangle+(1.1, -0.5) node [midway] {\faComment}; \draw[rounded corners=5pt] (3.7, -0.1) rectangle+(1.1, -0.5) node [midway] {\faHeart}; \draw (0.1, -0.9) rectangle+(4.1, -0.5) node [midway] {Address entry}; \draw[rounded corners=5pt] (4.33, -0.9) rectangle+(0.5, -0.5) node [midway] {\faCaretDown}; \draw[rounded corners=5pt] (0.1, -1.55) rectangle+(2, -0.5) node [midway] {\faWifi{}~Connect}; \draw[rounded corners=5pt] (2.3, -1.55) rectangle+(2.5, -0.5) node [midway] {\faFolderOpen{}~Open trace}; \draw (0.1, -2.1) node[anchor=north west] {Discovered clients: \faFilter}; \draw (0.1, -2.6) node[anchor=north west] {127.0.0.1 | 21 s | Application}; \end{tikzpicture} \caption{Welcome dialog.} \label{welcomedialog} \end{figure} Both connecting to a client and opening a saved trace will present you with the main profiler view, which you can use to analyze the data (see section~\ref{analyzingdata}). \subsubsection{Connection information pop-up} \label{connectionpopup} If this is a real-time capture, you will also have access to the connection information pop-up (figure~\ref{connectioninfo}) through the \emph{\faWifi{}~Connection} button, with the capture status similar to the one displayed by the command line utility. This dialog also displays the connection speed graphed over time and the profiled application's current frames per second and frame time measurements. The \emph{Query backlog} consists of two numbers. The first one represents the number of queries that were held back due to the bandwidth volume overwhelming the available network send buffer. The second one shows how many queries are in-flight, meaning requests which were sent to the client, but weren't yet answered. While these numbers drains down to zero, the performance of real time profiling may be temporarily compromised. The circle displayed next to the bandwidth graph signals the connection status. If it's red, the connection is active. If it's gray, the client has disconnected. You can use the \faSave{}~\emph{Save trace} button to save the current profile data to a file\footnote{This should be taken literally. If a live capture is in progress and a save is performed, some data may be missing from the capture and won't be saved.}. Use the \faPlug{}~\emph{Stop} button to disconnect from the client\footnote{While requesting disconnect stops retrieval of any new events, the profiler will wait for any data that is still pending for the current set of events.}. The \faExclamationTriangle{}~\emph{Discard} button is used to discard current trace. \begin{figure}[h] \centering\begin{tikzpicture} \draw (0, 0.5) rectangle (6.1, -3.2); \draw (0, 0.5) node[anchor=north west] {Connected to: 127.0.0.1}; \draw[pattern=north east lines] (0.35, -0.35) circle(0.2); \draw (0.7, -0.1) rectangle+(3.5, -0.5) node[midway] {Bandwidth graph}; \draw (4.3, -0.1) node[anchor=north west] {1.33 Mbps}; \draw (0.1, -0.6) node[anchor=north west] {Ratio 40.4\% \hspace{5pt} Real: 3.29 Mbps}; \draw (0.1, -1.1) node[anchor=north west] {Data transferred: 23.11 MB}; \draw (0.1, -1.6) node[anchor=north west] {Query backlog: 0 + 12}; \draw (0.1, -2.1) node[anchor=north west] {FPS: 60 \hspace{5pt} Frame time: 16.7 ms}; \draw[rounded corners=5pt] (0.1, -2.6) rectangle+(2.2, -0.5) node [midway] {\faSave{}~Save trace}; \draw[rounded corners=5pt] (2.6, -2.6) rectangle+(1.3, -0.5) node [midway] {\faPlug{}~Stop}; \draw[rounded corners=5pt] (4.0, -2.6) rectangle+(2, -0.5) node [midway] {\faExclamationTriangle{}~Discard}; \end{tikzpicture} \caption{Connection information pop-up.} \label{connectioninfo} \end{figure} If the profiled application opted to provide trace parameters (see section~\ref{traceparameters}) and the connection is still active, this pop-up will also contain a \emph{trace parameters} section, listing all the provided options. When you change any value here, a callback function will be executed on the client. \subsubsection{Automatic loading or connecting} You can pass trace file name as an argument to the profiler application to open the capture, skipping the welcome dialog. You can also use the \texttt{-a address} argument to automatically connect to the given address. To specify the network port, pass the \texttt{-p port} parameter. It will be used for connections to client (overridable in the UI) and for listening to client discovery broadcasts. \subsection{Connection speed} Tracy network bandwidth requirements depend on the amount of data collection the profiled application is performing. In typical use case scenarios, you may expect anything between 1~Mbps and 100~Mbps data transfer rate. The maximum attainable connection speed is determined by the ability of the client to provide data and the ability of the server to process the received data. In an extreme conditions test performed on an i7~8700K, the maximum transfer rate peaked at 950~Mbps. In each second the profiler was able to process 27~million zones and consume 1~GB of RAM. \subsection{Memory usage} The captured data is stored in RAM and only written to the disk, when the capture finishes. This can result in memory exhaustion when you are capturing massive amounts of profile data, or even in normal usage situations, when the capture is performed over a long stretch of time. The recommended usage pattern is to perform moderate instrumentation of the client code and limit capture time to the strict necessity. In some cases it may be useful to perform an \emph{on-demand} capture, as described in section~\ref{ondemand}. In such case you will be able to profile only the interesting case (e.g.\ behavior during loading of a level in a game), ignoring all the unneeded data. If you truly need to capture large traces, you have two options. Either buy more RAM, or use a large swap file on a fast disk drive\footnote{The operating system is able to manage memory paging much better than Tracy would be ever able to.}. \subsection{Trace versioning} Each new release of Tracy changes the internal format of trace files. While there is a backwards compatibility layer, allowing loading of traces created by previous versions of Tracy in new releases, it won't be there forever. You are thus advised to upgrade your traces using the utility contained in the \texttt{update} directory. To use it, you will need to provide the input file and the output file. The program will print a short summary when it finishes, with information about trace file versions, their respective sizes and the output trace file compression ratio: \begin{verbatim} % ./update old.tracy new.tracy old.tracy (0.3.0) {916.4 MB} -> new.tracy (0.4.0) {349.4 MB, 31.53%} 9.7 s, 38.13% change \end{verbatim} The new file contains the same data as the old one, but in the updated internal representation. Note that to perform an upgrade, whole trace needs to be loaded to memory. \subsubsection{Archival mode} The update utility supports optional higher levels of data compression, which reduce disk size of traces, at the cost of increased compression times. With the default settings, the output files have a reasonable size and are quick to save and load. A list of available compression modes, parameters that enable them, and compression results is available in table~\ref{compressiontimes} and figures~\ref{savesize}, \ref{savetime} and~\ref{loadtime}. \begin{table}[h] \centering \begin{tabular}[h]{c|c|c|c|c} \textbf{Mode} & \textbf{Size} & \textbf{Ratio} & \textbf{Save time} & \textbf{Load time} \\ \hline \emph{default} & 162.48 MB & 17.19\% & 1.91 \si{\second} & 470 \si{\milli\second} \\ \texttt{-{}-hc} & 77.33 MB & 8.18\% & 39.24 \si{\second} & 401 \si{\milli\second} \\ \texttt{-{}-extreme} & 72.67 MB & 7.68\% & 4:30 & 406 \si{\milli\second} \\ \hline \texttt{-{}-zstd 1} & 63.17 MB & 6.68\% & 2.27 \si{\second} & 868 \si{\milli\second} \\ \texttt{-{}-zstd 2} & 63.29 MB & 6.69\% & 2.31 \si{\second} & 884 \si{\milli\second} \\ \texttt{-{}-zstd 3} & 62.94 MB & 6.65\% & 2.43 \si{\second} & 867 \si{\milli\second} \\ \texttt{-{}-zstd 4} & 62.81 MB & 6.64\% & 2.44 \si{\second} & 855 \si{\milli\second} \\ \texttt{-{}-zstd 5} & 61.04 MB & 6.45\% & 3.98 \si{\second} & 855 \si{\milli\second} \\ \texttt{-{}-zstd 6} & 60.27 MB & 6.37\% & 4.19 \si{\second} & 827 \si{\milli\second} \\ \texttt{-{}-zstd 7} & 61.53 MB & 6.5\% & 6.6 \si{\second} & 761 \si{\milli\second} \\ \texttt{-{}-zstd 8} & 60.44 MB & 6.39\% & 7.84 \si{\second} & 746 \si{\milli\second} \\ \texttt{-{}-zstd 9} & 59.58 MB & 6.3\% & 9.6 \si{\second} & 724 \si{\milli\second} \\ \texttt{-{}-zstd 10} & 59.36 MB & 6.28\% & 10.29 \si{\second} & 706 \si{\milli\second} \\ \texttt{-{}-zstd 11} & 59.2 MB & 6.26\% & 11.23 \si{\second} & 717 \si{\milli\second} \\ \texttt{-{}-zstd 12} & 58.51 MB & 6.19\% & 15.43 \si{\second} & 695 \si{\milli\second} \\ \texttt{-{}-zstd 13} & 56.16 MB & 5.94\% & 35.55 \si{\second} & 642 \si{\milli\second} \\ \texttt{-{}-zstd 14} & 55.76 MB & 5.89\% & 37.74 \si{\second} & 627 \si{\milli\second} \\ \texttt{-{}-zstd 15} & 54.65 MB & 5.78\% & 1:01 & 600 \si{\milli\second} \\ \texttt{-{}-zstd 16} & 50.94 MB & 5.38\% & 1:34 & 537 \si{\milli\second} \\ \texttt{-{}-zstd 17} & 50.18 MB & 5.30\% & 1:44 & 542 \si{\milli\second} \\ \texttt{-{}-zstd 18} & 49.91 MB & 5.28\% & 2:17 & 554 \si{\milli\second} \\ \texttt{-{}-zstd 19} & 46.99 MB & 4.97\% & 7:09 & 605 \si{\milli\second} \\ \texttt{-{}-zstd 20} & 46.81 MB & 4.95\% & 7:08 & 608 \si{\milli\second} \\ \texttt{-{}-zstd 21} & 45.77 MB & 4.84\% & 13:01 & 614 \si{\milli\second} \\ \texttt{-{}-zstd 22} & 45.52 MB & 4.81\% & 15:11 & 621 \si{\milli\second} \end{tabular} \caption{Compression results for an example trace. \\ Tests performed on Ryzen 9 3900X.} \label{compressiontimes} \end{table} \begin{figure}[h] \begin{minipage}[c]{.475\textwidth} \begin{figure}[H] \centering\begin{tikzpicture} \begin{axis}[xlabel=Mode,ylabel=Size (MB), legend pos=north west, width=\textwidth] \addplot[mark=x, red] plot coordinates { (1, 63.17) (2, 63.29) (3, 62.94) (4, 62.81) (5, 61.04) (6, 60.27) (7, 61.53) (8, 60.44) (9, 59.58) (10, 59.36) (11, 59.2) (12, 58.51) (13, 56.16) (14, 55.76) (15, 54.65) (16, 50.94) (17, 50.18) (18, 49.91) (19, 46.99) (20, 46.81) (21, 45.77) (22, 45.52) }; \addlegendentry{zstd} \addplot[mark=o, blue] plot coordinates { (23, 162.48) }; \addlegendentry{default} \addplot[mark=*, blue] plot coordinates { (24, 77.33) }; \addlegendentry{hc} \addplot[mark=triangle*, blue] plot coordinates { (25, 72.67) }; \addlegendentry{extreme} \end{axis} \end{tikzpicture} \caption{Plot of trace sizes for different compression modes (see table~\ref{compressiontimes}).} \label{savesize} \end{figure} \end{minipage}% \hspace{0.04\textwidth}% \begin{minipage}[c]{.475\textwidth} \begin{figure}[H] \centering\begin{tikzpicture} \begin{semilogyaxis}[xlabel=Mode,ylabel=Time (s), legend pos=north west, width=\textwidth] \addplot[mark=x, red] plot coordinates { (1, 2.27) (2, 2.31) (3, 2.43) (4, 2.44) (5, 3.98) (6, 4.19) (7, 6.6) (8, 7.84) (9, 9.6) (10, 10.29) (11, 11.23) (12, 15.43) (13, 35.55) (14, 37.74) (15, 61) (16, 94) (17, 104) (18, 137) (19, 429) (20, 428) (21, 781) (22, 911) }; \addlegendentry{zstd} \addplot[mark=o, blue] plot coordinates { (23, 1.91) }; \addlegendentry{default} \addplot[mark=*, blue] plot coordinates { (24, 39.24) }; \addlegendentry{hc} \addplot[mark=triangle*, blue] plot coordinates { (25, 270) }; \addlegendentry{extreme} \end{semilogyaxis} \end{tikzpicture} \caption{Logarithmic plot of trace compression times for different compression modes (see table~\ref{compressiontimes}).} \label{savetime} \end{figure} \end{minipage} \end{figure} \begin{figure}[H] \centering\begin{tikzpicture} \begin{axis}[xlabel=Mode,ylabel=Time (ms), legend pos=south west, width=0.475\textwidth] \addplot[mark=x, red] plot coordinates { (1, 868) (2, 884) (3, 867) (4, 855) (5, 855) (6, 827) (7, 761) (8, 746) (9, 724) (10, 706) (11, 717) (12, 695) (13, 642) (14, 627) (15, 600) (16, 537) (17, 542) (18, 554) (19, 605) (20, 608) (21, 614) (22, 621) }; \addlegendentry{zstd} \addplot[mark=o, blue] plot coordinates { (23, 470) }; \addlegendentry{default} \addplot[mark=*, blue] plot coordinates { (24, 401) }; \addlegendentry{hc} \addplot[mark=triangle*, blue] plot coordinates { (25, 406) }; \addlegendentry{extreme} \end{axis} \end{tikzpicture} \caption{Plot of trace load times for different compression modes (see table~\ref{compressiontimes}).} \label{loadtime} \end{figure} Trace files created using the \emph{default}, \emph{hc} and \emph{extreme} modes are optimized for fast decompression and can be further compressed using file compression utilities. For example, using 7-zip results in archives of the following sizes: 77.2 MB, 54.3 MB, 52.4 MB. For archival purposes it is however much better to use the \emph{zstd} compression modes, which are faster, compress trace files more tightly, and are directly loadable by the profiler, without the intermediate decompression step. \subsection{Instrumentation failures} \label{instrumentationfailures} In some cases your program may be incorrectly instrumented, for example you could have unbalanced zone begin and end events, or you could report a memory free event without first reporting a memory allocation event. When Tracy detects such misbehavior it immediately terminates connection with the client and displays an error message. \section{Analyzing captured data} \label{analyzingdata} You have instrumented your application and you have captured a profiling trace. Now you want to look at the collected data. You can do this in the application contained in the \texttt{profiler} directory. The workflow is identical, whether you are viewing a previously saved trace, or if you're performing a live capture, as described in section~\ref{interactiveprofiling}. \subsection{Time display} In most cases Tracy will display an approximation of time value, depending on how big it is. For example, a short time range will be displayed as 123~\si{\nano\second}, and some longer ones will be shortened to 123.45~\si{\micro\second}, 123.45~\si{\milli\second}, 12.34~\si{\second}, 1:23.4, 12:34:56, or even 1d12:34:56 to indicate more than a day has passed. While such presentation makes time values easy to read, it is not always appropriate. For example, you may have multiple events happen at a time approximated to 1:23.4, giving you a precision of only $\sfrac{1}{10}$ of a second. There's certainly a lot that can happen in 100~\si{\milli\second}. To solve this problem, an alternative time display is used in appropriate places. It combines a day--hour--minute--second value with full nanosecond resolution, resulting in values such as 1:23~456,789,012~\si{\nano\second}. \subsection{Main profiler window} The main profiler window is split into three sections, as seen on figure~\ref{mainwindow}: the control menu, the frame time graph and the timeline display. \begin{figure}[h] \centering\begin{tikzpicture} \draw (0, 0) rectangle (15.5, -5.5); \draw[pattern=crosshatch dots] (0, 0) rectangle+(15.5, 0.3); \draw[rounded corners=5pt] (0.1, -0.1) rectangle+(0.5, -0.5) node [midway] {\faPowerOff}; \draw[rounded corners=5pt] (0.7, -0.1) rectangle+(1.8, -0.5) node [midway] {\faCog{} Options}; \draw[rounded corners=5pt] (2.6, -0.1) rectangle+(2.2, -0.5) node [midway] {\faTags{} Messages}; \draw[rounded corners=5pt] (4.9, -0.1) rectangle+(2.1, -0.5) node [midway] {\faSearch{} Find zone}; \draw[rounded corners=5pt] (7.1, -0.1) rectangle+(2, -0.5) node [midway] {\faSortAmountUp{} Statistics}; \draw[rounded corners=5pt] (9.2, -0.1) rectangle+(2, -0.5) node [midway] {\faMemory{} Memory}; \draw[rounded corners=5pt] (11.3, -0.1) rectangle+(2.1, -0.5) node [midway] {\faBalanceScale{} Compare}; \draw[rounded corners=5pt] (13.5, -0.1) rectangle+(1.3, -0.5) node [midway] {\faFingerprint{} Info}; \draw[rounded corners=5pt] (14.9, -0.1) rectangle+(0.5, -0.5) node [midway] {\faTools{}}; \draw[rounded corners=5pt] (0.1, -0.7) rectangle+(0.4, -0.5) node [midway] {\faCaretLeft}; \draw (0.6, -0.7) node[anchor=north west] {Frames: 364}; \draw[rounded corners=5pt] (2.8, -0.7) rectangle+(0.4, -0.5) node [midway] {\faCaretRight}; \draw[rounded corners=5pt] (3.3, -0.7) rectangle+(0.5, -0.5) node [midway] {\faCaretDown}; \draw[rounded corners=5pt] (3.9, -0.7) rectangle+(0.5, -0.5) node [midway] {\faCrosshairs}; \draw (4.5, -0.65) node[anchor=north west] {\faEye~52.7 ms \hspace{5pt} \faDatabase~6.06 s \hspace{5pt} \faMemory~195.2 MB}; \draw[dashed] (10.6, -0.75) rectangle+(3.2, -0.4) node[midway] {Notification area}; \draw (0.1, -1.3) rectangle+(15.3, -1) node [midway] {Frame time graph}; \draw (0.1, -2.4) rectangle+(15.3, -3) node [midway] {Timeline view}; \end{tikzpicture} \caption{Main profiler window. Note that the top line of buttons has been split into two rows in this manual.} \label{mainwindow} \end{figure} \subsubsection{Control menu} \label{controlmenu} The control menu (top row of buttons) provides access to various features of the profiler. The buttons perform the following actions: \begin{itemize} \item \emph{\faWifi{}~Connection} -- Opens the connection information popup (see section~\ref{connectionpopup}). Only available when live capture is in progress. \item \emph{\faPowerOff{} Close} -- This button unloads the current profiling trace and returns to the welcome menu, where another trace can be loaded. In live captures it is replaced by \emph{\faPause{}~Pause}, \emph{\faPlay{}~Resume} and \emph{\faSquare{}~Stopped} buttons. \item \emph{\faPause{} Pause} -- While a live capture is in progress, the profiler will display the last three fully captured frames, so that you can see the current behavior of the program. Use this button\footnote{Or perform any action on the timeline view.} to stop the automatic updates of the timeline view (the capture will be still progressing). \item \emph{\faPlay{} Resume} -- Use this button to resume following the most recent three frames in a live capture. \item \emph{\faSquare{} Stopped} -- Inactive button used to indicate that the client application was terminated. \item \emph{\faCog{} Options} -- Toggles the settings menu (section~\ref{options}). \item \emph{\faTags{} Messages} -- Toggles the message log window (section~\ref{messages}), which displays custom messages sent by the client, as described in section~\ref{messagelog}. \item \emph{\faSearch{} Find zone} -- This buttons toggles the find zone window, which allows inspection of zone behavior statistics (section~\ref{findzone}). \item \emph{\faSortAmountUp{} Statistics} -- Toggles the statistics window, which displays zones sorted by their total time cost (section~\ref{statistics}). \item \emph{\faMemory{} Memory} -- Various memory profiling options may be accessed here (section~\ref{memorywindow}). \item \emph{\faBalanceScale{} Compare} -- Toggles the trace compare window, which allows you to see the performance difference between two profiling runs (section~\ref{compare}). \item \emph{\faFingerprint{} Info} -- Show general information about the trace (section~\ref{traceinfo}). \item \emph{\faTools{} Tools} -- Allows access to optional data collected during capture. Some choices might be unavailable. \begin{itemize} \item \emph{\faPlay{}~Playback} -- If frame images were captured (section~\ref{frameimages}), you will have option to open frame image playback window, described in chapter~\ref{playback}. \item \emph{\faSlidersH{}~CPU~data} -- If context switch data was captured (section~\ref{contextswitches}), this button will allow inspecting what was the processor load during the capture, as described in section~\ref{cpudata}. \item \emph{\faStickyNote{}~Annotations} -- If annotations have been made (section~\ref{annotatingtrace}), you can open a list of all annotations, described in chapter~\ref{annotationlist}. \end{itemize} \end{itemize} The frame information block consists of four elements: the current frame set name along with the number of captured frames, the two navigational buttons \faCaretLeft{} and \faCaretRight{}, which allow you to focus the timeline view on the previous or next frame, and the frame set selection button \faCaretDown{}, which is used to switch to a another frame set\footnote{See section~\ref{framesets} for another way to change the active frame set.}. The \emph{\faCrosshairs{}~Go to frame} button allows zooming the timeline view on the specified frame. For more information about marking frames, see section~\ref{markingframes}. The next three items show the \emph{\faEye{}~view time range}, the \emph{\faDatabase{}~time span} of the whole capture, and the \emph{\faMemory{}~memory usage} of the profiler. \paragraph{Notification area} The notification area is used to display informational notices, for example how long it took to load a trace from disk. A pulsating dot next to the \faTasks~icon indicates that some background tasks are being performed, that may need to be completed before full capabilities of the profiler are available. If a crash was captured during profiling (section~\ref{crashhandling}), a \emph{\faSkull{}~crash} icon will be displayed. The red \faSatelliteDish{}~icon indicates that queries are currently being backlogged, while the same yellow icon indicates that some queries are currently in-flight (see chapter~\ref{connectionpopup} for more information). If drawing of timeline elements was disabled in the options menu (section~\ref{options}), the following orange icons will be used to remind the user about that fact. Click on the icons to enable drawing of the selected elements. Note that collapsed labels (section~\ref{zoneslocksplots}) are not taken into account here. \begin{itemize} \item \faExpand{} -- Display of empty labels is enabled. \item \faHiking{} -- Context switches are hidden. \item \faSlidersH{} -- CPU data is hidden. \item \faEye{} -- GPU zones are hidden. \item \faMicrochip{} -- CPU zones are hidden. \item \faLock{} -- Locks are hidden. \item \faSignature{} -- Plots are hidden. \item \faGhost{} -- Ghost zones are not displayed. \item \faLowVision{} -- At least one timeline item (e.g. a single thread, a single plot, a single lock, etc.) is hidden. \end{itemize} \subsubsection{Frame time graph} \label{frametimegraph} The graph of currently selected frame set (figure~\ref{frametime}) provides an outlook on the time spent in each frame, allowing you to see where the problematic frames are and to quickly navigate to them. \begin{figure}[h] \centering\begin{tikzpicture} \fill[black!20] (2.15, 0) rectangle+(1.2, 1); \draw (0, 0) rectangle (10, 1); \draw[pattern=north east lines] (0.1, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (0.4, 0) rectangle+(0.2, 0.21); \draw[pattern=north east lines] (0.7, 0) rectangle+(0.2, 0.18); \draw[pattern=north east lines] (1, 0) rectangle+(0.2, 0.22); \draw[pattern=north east lines] (1.3, 0) rectangle+(0.2, 0.7); \draw[pattern=north east lines] (1.6, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (1.9, 0) rectangle+(0.2, 0.31); \draw[pattern=north east lines] (2.2, 0) rectangle+(0.2, 0.12); \draw[pattern=north east lines] (2.5, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (2.8, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (3.1, 0) rectangle+(0.2, 0.25); \draw[pattern=north east lines] (3.4, 0) rectangle+(0.2, 0.19); \draw[pattern=north east lines] (3.7, 0) rectangle+(0.2, 0.23); \draw[pattern=north east lines] (4, 0) rectangle+(0.2, 0.19); \draw[pattern=north east lines] (4.3, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (4.6, 0) rectangle+(0.2, 0.16); \draw[pattern=north east lines] (4.9, 0) rectangle+(0.2, 0.21); \draw[pattern=north east lines] (5.2, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (5.5, 0) rectangle+(0.2, 0.8); \draw[pattern=north east lines] (5.8, 0) rectangle+(0.2, 0.1); \draw[pattern=north east lines] (6.1, 0) rectangle+(0.2, 0.21); \draw[pattern=north east lines] (6.4, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (6.7, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (7, 0) rectangle+(0.2, 0.28); \draw[pattern=north east lines] (7.3, 0) rectangle+(0.2, 0.22); \draw[pattern=north east lines] (7.6, 0) rectangle+(0.2, 0.16); \draw[pattern=north east lines] (7.9, 0) rectangle+(0.2, 0.2); \draw[pattern=north east lines] (8.2, 0) rectangle+(0.2, 0.21); \draw[pattern=north east lines] (8.5, 0) rectangle+(0.2, 0.18); \draw[pattern=north east lines] (8.8, 0) rectangle+(0.2, 0.2); \draw[dotted] (0, 0.325) -- +(10, 0); \draw[dotted] (0, 0.6) -- +(10, 0); \draw[dotted] (0, 0.8) -- +(10, 0); \end{tikzpicture} \caption{Frame time graph.} \label{frametime} \end{figure} Each bar displayed on the graph represents an unique frame in the current frame set\footnote{Unless the view is zoomed out and multiple frames are merged into one column.}. The progress of time is in the right direction. The height of the bar indicates the time spent in frame, complemented with the color information: \begin{itemize} \item If the bar is \emph{blue}, then the frame met the \emph{best} time of 143 FPS, or 6.99 \si{\milli\second}\footnote{The actual target is 144 FPS, but one frame leeway is allowed to account for timing inaccuracies.} (represented by blue target line). \item If the bar is \emph{green}, then the frame met the \emph{good} time of 59 FPS, or 16.94 \si{\milli\second} (represented by green target line). \item If the bar is \emph{yellow}, then the frame met the \emph{bad} time of 29 FPS, or 34.48 \si{\milli\second} (represented by yellow target line). \item If the bar is \emph{red}, then the frame didn't met any time limits. \end{itemize} The frames visible on the timeline are marked with a violet box drawn over them. When a zone is displayed in the find zone window (section~\ref{findzone}), the coloring of frames may be changed, as described in section~\ref{frametimefindzone}. Moving the \faMousePointer{} mouse cursor over the frames displayed on the graph will display tooltip with information about frame number, frame time, frame image (if available, see chapter~\ref{frameimages}), etc. Such tooltips are common for many UI elements in the profiler and won't be mentioned later in the manual. The timeline view may be focused on the frames, by clicking or dragging the \LMB{}~left mouse button on the graph. The graph may be scrolled left and right by dragging the \RMB{}~right mouse button over the graph. The view may be zoomed in and out by using the \Scroll{}~mouse scroll. If the view is zoomed out, so that multiple frames are merged into one column, the highest frame time will be used to represent the given column. Clicking the \LMB{}~left mouse button on the graph while the \keys{\ctrl}~key is pressed will open the frame image playback window (section~\ref{playback}) and set the playback to the selected frame. See section~\ref{frameimages} for more information about frame images. \subsubsection{Timeline view} The timeline is the most important element of the profiler UI. All the captured data is displayed there, laid out on the horizontal axis, according to the flow of time. Where there was no profiling performed, the timeline is dimmed out. The view is split into three parts: the time scale, the frame sets and the combined zones, locks and plots display. \subparagraph{Collapsed items} \label{collapseditems} Due to extreme differences in time scales, you will almost constantly see events that are too small to be displayed on the screen. Such events have preset minimum size (so they can be seen) and are marked with a zig-zag pattern, to indicate that you need to zoom-in to see more detail. The zig-zag pattern can be seen applied to frame sets on figure~\ref{framesetsfig}, and to zones on figure~\ref{zoneslocks}. \paragraph{Time scale} The time scale is a quick aid in determining the relation between screen space and the time it represents (figure~\ref{timescale}). \begin{figure}[h] \centering\begin{tikzpicture} \foreach \x in {0,1,2,...,10} { \draw (\x+0, 0) -- +(0, -0.4); \draw (\x+0.1, 0) -- +(0, -0.2); \draw (\x+0.2, 0) -- +(0, -0.2); \draw (\x+0.3, 0) -- +(0, -0.2); \draw (\x+0.4, 0) -- +(0, -0.2); \draw (\x+0.5, 0) -- +(0, -0.3); \draw (\x+0.6, 0) -- +(0, -0.2); \draw (\x+0.7, 0) -- +(0, -0.2); \draw (\x+0.8, 0) -- +(0, -0.2); \draw (\x+0.9, 0) -- +(0, -0.2); } \draw (11, 0) -- +(0, -0.4); \draw (-0.2, -0.4) node[anchor=north west] {+13.76 s}; \draw (1.85, -0.4) node[anchor=north west] {20 \si{\micro\second}}; \draw (3.85, -0.4) node[anchor=north west] {40 \si{\micro\second}}; \draw (5.85, -0.4) node[anchor=north west] {60 \si{\micro\second}}; \draw (7.85, -0.4) node[anchor=north west] {80 \si{\micro\second}}; \draw (9.85, -0.4) node[anchor=north west] {100 \si{\micro\second}}; \end{tikzpicture} \caption{Time scale.} \label{timescale} \end{figure} The leftmost value on the scale represents the time at which the timeline starts. The rest of numbers label the notches on the scale, with some numbers omitted, if there's no space to display them. \paragraph{Frame sets} \label{framesets} Frames from each frame set are displayed directly underneath the time scale. Each frame set occupies a separate row. The currently selected frame set is highlighted with bright colors, with the rest dimmed out. \begin{figure}[h] \centering\begin{tikzpicture} \draw[densely dotted] (-0.05, 0) -- +(0, -1.5); \draw (0, 0) -- +(0, -0.5); \draw (0, -0.25) -- +(1, 0) node[anchor=west] {Frame 312 (6.99 \si{\milli\second})}; \draw (4.3, -0.25) -- +(1, 0); \draw (5.3, 0) -- +(0, -0.5); \draw[densely dotted] (5.35, 0) -- +(0, -1.5); \draw[decorate,decoration=zigzag] (5.4, -0.25) -- +(1.25, 0); \draw[densely dotted] (6.7, 0) -- +(0, -1.5); \draw (6.75, 0) -- +(0, -0.5); \draw (6.75, -0.25) -- +(0.5, 0) node[anchor=west] {Frame 347 (5.24 \si{\milli\second})}; \draw (10.5, -0.25) -- +(0.5, 0); \draw (11, 0) -- +(0, -0.5); \draw[densely dotted] (11.05, 0) -- +(0, -1.5); \draw (11.1, 0) -- +(0, -0.5); \draw (11.1, -0.25) -- +(0.5, 0) node[anchor=west] {1.63 \si{\milli\second}}; \draw (13, -0.25) -- +(0.5, 0); \draw (13.5, 0) -- +(0, -0.5); \draw[densely dotted] (13.55, 0) -- +(0, -1.5); \draw (13.6, 0) -- +(0, -0.5); \draw (13.6, -0.25) -- +(0.5, 0); \draw (14.1, 0) -- +(0, -0.5); \draw[densely dotted] (14.15, 0) -- +(0, -1.5); \end{tikzpicture} \caption{Frames on the timeline.} \label{framesetsfig} \end{figure} On figure~\ref{framesetsfig} we can see the fully described frames~312 and 347. The description consists of the frame name, which is \emph{Frame} for the default frame set (section~\ref{markingframes}) or the name you used for the secondary name set (section~\ref{secondaryframeset}), the frame number and the frame time. The frame~348 is too small to be fully displayed, so only the frame time is shown. The frame~349 is even smaller, with no space for any text. Moreover, frames~313~to~346 are too small to be displayed individually, so they are replaced with a zig-zag pattern, as described in section~\ref{collapseditems}. You can also see that there are frame separators, projected down to the rest of the timeline view. Note that only the separators for the currently selected frame set are displayed. You can make a frame set active by clicking the \LMB{}~left mouse button on a frame set row you want to select (also see section~\ref{controlmenu}). Clicking the \MMB{} middle mouse button on a frame will zoom the view to the extent of the frame. If a frame has an associated frame image (see chapter~\ref{frameimages}), you can hold the \keys{\ctrl} key and click the \LMB{}~left mouse button on the frame, to open the frame image playback window (see chapter~\ref{playback}) and set the playback to the selected frame. \paragraph{Zones, locks and plots display} \label{zoneslocksplots} On this combined view you will find the zones with locks and their associated threads. The plots are graphed right below. \begin{figure}[h] \centering\begin{tikzpicture} \draw(0, 0.55) -- (0.2, 0.55) -- (0.1, 0.35) -- (0, 0.55); \draw(0.25, 0.7) node[anchor=north west] {Main thread}; \draw[densely dotted] (0, 0.2) -- +(15, 0); \draw(1.2, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(2.2, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(3.3, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(5, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(6.2, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(7, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(8.2, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(9.8, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(11.7, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw(12.7, -0.025) node[circle,draw,inner sep=0pt,minimum size=4] {}; \draw[dotted, thick] (0, -0.25) -- (1, -0.25); \draw[thick] (1, -0.25) -- (3.8, -0.25); \draw[dotted, thick] (3.8, -0.25) -- (4.8, -0.25 ); \draw[thick] (4.8, -0.25) -- (10.5, -0.25); \draw[dotted, thick] (10.5, -0.25) -- (11, -0.25); \draw[thick] (11, -0.25) -- (14.2, -0.25); \draw[dotted, thick] (14.2, -0.25) -- (15, -0.25); \draw(1.5, -0.5) rectangle+(5, -0.5) node[midway] {Update}; \draw(2, -1) rectangle+(0.75, -0.5) node[midway] {6}; \draw[densely dotted, decorate,decoration=zigzag] (2, -1.25) -- +(0.75, 0 ); \draw(3, -1) rectangle+(3, -0.5) node[midway] {Physics}; \draw(3.2, -1.5) rectangle+(0.5, -0.5); \draw(4.8, -1.5) rectangle+(0.3, -0.5); \draw(5.1, -1.5) rectangle+(0.4, -0.5); \draw(7.5, -0.5) rectangle+(6.5, -0.5) node[midway] {Render}; \draw(0, -2.5) node[anchor=north west] {Physics lock}; \draw[pattern=crosshatch dots] (3.1, -2.5) rectangle+(2.5, -0.5); \draw(0, -3.65) -- (0.2, -3.65) -- (0.1, -3.85) -- (0, -3.65); \draw(0.25, -3.5) node[anchor=north west] {Streaming thread \faGhost}; \draw[densely dotted] (0, -4) -- +(15, 0); \draw[thick] (0, -4.25) -- (6.1, -4.25); \draw[dotted, thick] (6.1, -4.25) -- (9.7, -4.25); \draw[thick] (9.7, -4.25) -- (15, -4.25); \draw(5.5, -4) -- (5.4, -3.85) -- (5.6, -3.85) -- (5.5, -4); \draw(0, -4.5) -- (6, -4.5) -- (6, -5) -- (0, -5); \draw(0, -4.5) node[anchor=north west] {Streaming job}; \draw(15, -4.5) -- (10, -4.5) -- (10, -5) -- (15, -5); \draw(15, -4.5) node[anchor=north east] {Streaming job}; \draw(10.4, -5) rectangle+(3, -0.5) node[midway] {Load image}; \end{tikzpicture} \caption{Zones and locks display.} \label{zoneslocks} \end{figure} The left hand side \emph{index area} of the timeline view displays various labels (threads, locks), which can be categorized in the following way: \begin{itemize} \item \emph{Light blue label} -- OpenGL/Vulkan context. Multi-threaded Vulkan contexts are additionally split into separate threads. \item \emph{Pink label} -- CPU data graph. \item \emph{White label} -- A CPU thread. Will be replaced by a bright red label in a thread that has crashed (section~\ref{crashhandling}). If automated sampling was performed, clicking the~\LMB{}~left mouse button on the \emph{\faGhost{}~ghost zones} button will switch zone display mode between 'instrumented' and 'ghost'. \item \emph{Light red label} -- Indicates a lock. \item \emph{Yellow label} -- Plot. \end{itemize} Labels accompanied by the \faCaretDown{}~symbol can be collapsed out of the view, to reduce visual clutter. Hover the~\faMousePointer{}~mouse pointer over the label to display additional information. Click the \MMB{}~middle mouse button on a label to zoom the view to the extent of the label contents. \subparagraph{Zones} In an example on figure~\ref{zoneslocks} you can see that there are two threads: \emph{Main thread} and \emph{Streaming thread}\footnote{By clicking on a thread name you can temporarily disable display of the zones in this thread.}. We can see that the \emph{Main thread} has two root level zones visible: \emph{Update} and \emph{Render}. The \emph{Update} zone is split into further sub-zones, some of which are too small to be displayed at the current zoom level. This is indicated by drawing a zig-zag pattern over the merged zones box (section~\ref{collapseditems}), with the number of collapsed zones printed in place of zone name. We can also see that the \emph{Physics} zone acquires the \emph{Physics lock} mutex for the most of its run time. Meanwhile the \emph{Streaming thread} is performing some \emph{Streaming jobs}. The first \emph{Streaming job} sent a message (section~\ref{messagelog}), which in addition to being listed in the message log is being indicated by a triangle over the thread separator. When there are multiple messages in one place, the triangle outline shape changes to a filled triangle. At high zoom levels, the zones will be displayed with additional markers, as presented on figure~\ref{inaccuracy}. The red regions at the start and end of a zone indicate the cost associated with recording an event (\emph{Queue delay}). The error bars show the timer inaccuracy (\emph{Timer resolution}). Note that these markers are only \emph{approximations}, as there are many factors that can impact the true cost of capturing a zone, for example cache effects, or CPU frequency scaling, which is unaccounted for (see section~\ref{checkenvironmentcpu}). \begin{figure}[h] \centering\begin{tikzpicture} \draw(0, 0) rectangle+(5, 0.5) node [midway] {Zone}; \draw[pattern=crosshatch dots] (0, 0) rectangle+(1, 0.5); \draw[pattern=crosshatch dots] (5, 0) rectangle+(1, 0.5); \draw(-0.5, 0.35) -- (-0.5, 0.15); \draw(0.5, 0.35) -- (0.5, 0.15); \draw(-0.5, 0.25) -- (0.5, 0.25); \draw(4.5, 0.35) -- (4.5, 0.15); \draw(5.5, 0.35) -- (5.5, 0.15); \draw(4.5, 0.25) -- (5.5, 0.25); \draw(0, -0.2) -- (0, -0.3) -- (5, -0.3) -- (5, -0.2); \draw(2.5, -0.3) node[anchor=north] {Zone extent}; \end{tikzpicture} \caption{Approximation of timer inaccuracies and zone collection cost.} \label{inaccuracy} \end{figure} The GPU zones are displayed just like CPU zones, with an OpenGL/Vulkan context in place of a thread name. Hovering the \faMousePointer{} mouse pointer over a zone will highlight all other zones that have the same source location with a white outline. Clicking the \LMB{}~left mouse button on a zone will open zone information window (section~\ref{zoneinfo}). Holding the \keys{\ctrl} key and clicking the \LMB{}~left mouse button on a zone will open zone statistics window (section~\ref{findzone}). Clicking the \MMB{}~middle mouse button on a zone will zoom the view to the extent of the zone. \subparagraph{Ghost zones} Display of ghost zones (not pictured on figure~\ref{zoneslocks}, but similar to normal zones view) can be enabled by clicking on the \emph{\faGhost{}~ghost zones} icon next to thread label, available if automated sampling (see chapter~\ref{sampling}) was performed. Ghost zones will also be displayed by default, if no instrumented zones are available for a given thread, to help with pinpointing functions that should be instrumented. Ghost zones represent true function calls in the program, periodically reported by the operating system. Due to the limited resolution of sampling, you need to take great care when looking at reported timing data. While it may be apparent that some small function requires a relatively long time to execute, for example 125~\si{\micro\second} (8~kHz~sampling rate), in reality this time represents a period between taking two distinct samples, not the actual function run time. Similarly, two (or more) distinct function calls may be represented as a single ghost zone, because the profiler doesn't have the information needed to know about true lifetime of a sampled function. Another common pitfall to watch for is the order of presented functions. \emph{It is not what you expect it to be!} Read chapter~\ref{readingcallstacks} for a critical insight on how call stacks might seem nonsensical at first, and why they aren't. The available information about ghost zones is quite limited, but it's enough to give you a rough outlook on the execution of your application. The timeline view alone is more than any other statistical profiler is able to present. In addition to that, Tracy properly handles inlined function calls, which are indicated by darker colored ghost zones. Clicking the \LMB{}~left mouse button on a ghost zone will open the corresponding source file location, if able (see chapter~\ref{sourceview} for conditions). There are three ways in which source locations can be assigned to a ghost zone: \begin{enumerate} \item If the selected ghost zone is \emph{not} an inline frame and its symbol data has been retrieved, the source location points to the function entry location (first line of the function). \item If the selected ghost zone is \emph{not} an inline frame, but its symbol data is not available, the source location will point to a semi-random location within the function body (i.e. to one of the sampled addresses in the program, but not necessarily the one representing the selected time stamp, as multiple samples with different addresses may be merged into one ghost zone). \item If the selected ghost zone \emph{is} an inline frame, the source location will point to a semi-random location within the inlined function body (see details in the above point). It is not possible to go to the entry location of such function, as it doesn't exist in the program binary. Inlined functions begin in the parent function. \end{enumerate} \subparagraph{Call stack samples} The row of dots right below the \emph{Main thread} label shows call stack sample points, which may have been automatically captured (see chapter~\ref{sampling} for more detail). Hovering the \faMousePointer{}~mouse pointer over each dot will display a short call stack summary, while clicking on a dot with the \LMB{}~left mouse button will open a more detailed call stack information window (see section~\ref{callstackwindow}). \subparagraph{Context switches} The thick line right below the samples represents context switch data (see section~\ref{contextswitches}). We can see that the main thread, as displayed, starts in a suspended state, represented by the dotted region. Then it is woken up and starts execution of the \texttt{Update} zone. In midst of the physics processing it is preempted, which explains why there is an empty space between child zones. Then it is resumed again and continues execution into the \texttt{Render} zone, where it is preempted again, but for a shorter time. After rendering is done, the thread sleeps again, presumably waiting for the vertical blanking, to indicate next frame. Similar information is also available for the streaming thread. Context switch regions are using the following color key: \begin{itemize} \item \emph{Green} -- Thread is running. \item \emph{Red} -- Thread is waiting to be resumed by the scheduler. There are many reasons why a thread may be in the waiting state. Hovering the \faMousePointer{}~mouse pointer over the region will display more information. \item \emph{Blue} -- Thread is waiting to be resumed and is migrating to another CPU core. This might have visible performance effects, because low level CPU caches are not shared between cores, which may result in additional cache misses. To avoid this problem, you may pin a thread to a specific core, by setting its affinity. \item \emph{Bronze} -- Thread has been placed in the scheduler's run queue and is about to be resumed. \end{itemize} \subparagraph{CPU data} This label is only available if context switch data was collected. It is split into two parts: a graph of CPU load by various threads running in the system, and a per-core thread execution display. The CPU load graph is showing how much CPU resources were used at any given time during program execution. The green part of the graph represents threads belonging to the profiled application and the gray part of the graph shows all other programs running in the system. Hovering the \faMousePointer{}~mouse pointer over the graph will display a list of threads running on the CPU at the given time. Each line in the thread execution display represents a separate logical CPU thread. If CPU topology data is available (see section~\ref{cputopology}), package and core assignment will be displayed in brackets, in addition to numerical processor identifier (i.e. \texttt{[\emph{package}:\emph{core}] CPU \emph{thread}}). When a core is busy executing a thread, a zone will be drawn at the appropriate time. Zones are colored according to the following key: \begin{itemize} \item \emph{Bright color} -- or \emph{orange} if dynamic thread colors are disabled -- Thread tracked by the profiler. \item \emph{Dark blue} -- Thread existing in the profiled application, but not known to the profiler. This may include internal profiler threads, helper threads created by external libraries, etc. \item \emph{Gray} -- Threads assigned to other programs running in the system. \end{itemize} When the \faMousePointer{}~mouse pointer is hovered over either the CPU data zone, or the thread timeline label, Tracy will display a line connecting all zones associated with the selected thread. This can be used to easily see how the thread was migrating across the CPU cores. Careful examination of the data presented on this graph may allow you to determine areas where the profiled application was fighting for system resources with other programs (see section~\ref{checkenvironmentos}), or give you a hint to add more instrumentation macros. \subparagraph{Locks} Mutual exclusion zones are displayed in each thread that tries to acquire them. There are three color-coded kinds of lock event regions that may be displayed. Note that when the timeline view is zoomed out, the contention regions are always displayed over the uncontented ones. \begin{itemize} \item \emph{Green region\footnote{This region type is disabled by default and needs to be enabled in options (section~\ref{options}).}} -- The lock is being held solely by one thread and no other thread tries to access it. In case of shared locks it is possible that multiple threads hold the read lock, but no thread requires a write lock. \item \emph{Yellow region} -- The lock is being owned by this thread and some other thread also wants to acquire the lock. \item \emph{Red region} -- The thread wants to acquire the lock, but is blocked by other thread, or threads in case of shared lock. \end{itemize} Hovering the \faMousePointer{}~mouse pointer over a lock timeline will highlight the lock in all threads to help reading the lock behavior. Hovering the \faMousePointer{}~mouse pointer over a lock event will display important information, for example a list of threads that are currently blocking, or which are blocked by the lock. Clicking the \LMB{}~left mouse button on a lock event or a lock label will open the lock information window, as described in section~\ref{lockwindow}. Clicking the \MMB{}~middle mouse button on a lock event will zoom the view to the extent of the event. \subparagraph{Plots} \label{plots} The numerical data values (figure~\ref{plot}) are plotted right below the zones and locks. Note that the minimum and maximum values currently displayed on the plot are visible on the screen, along with the y range of the plot and number of drawn data points. The discrete data points are indicated with little rectangles. Multiple data points are indicated by a filled rectangle. \begin{figure}[h] \centering\begin{tikzpicture} [dot/.style={rectangle,draw,scale=0.5}]; \draw(0, -0.15) -- (0.2, -0.15) -- (0.1, -0.35) -- (0, -0.15); \draw(0.25, 0) node[anchor=north west] {Queue size (y-range: 463, visible data points: 7)}; \draw[densely dotted] (0, -0.5) -- +(15, 0); \draw(0, -0.5) node[anchor=north west] {731}; \draw(0, -3) node[anchor=south west] {268}; \draw[densely dotted] (0, -3) -- +(15, 0); \draw(0, -2) -- (1, -2.3) node[dot]{} -- (1.2, -3) node[dot]{} -- (6, -1.2) node[dot]{} -- (7, -0.5) node[dot]{} -- (9, -2) node[dot]{} -- (15, -2.5); \end{tikzpicture} \caption{Plot display.} \label{plot} \end{figure} When memory profiling (section~\ref{memoryprofiling}) is enabled, Tracy will automatically generate a \emph{\faMemory{}~Memory usage} plot, which has extended capabilities. Hovering over a data point (memory allocation event) will visually display duration of the allocation. Clicking the \LMB{} left mouse button on the data point will open the memory allocation information window, which will display the duration of the allocation as long as the window is open. Another plot that is automatically provided by Tracy is the \emph{\faTachometer*{}~CPU usage} plot, which represents the total system CPU usage percentage (it is not limited to the profiled application). \subsubsection{Navigating the view} Hovering the \faMousePointer{} mouse pointer over the timeline view will display a vertical line that can be used to visually line-up events in multiple threads. Dragging the \LMB{} left mouse button will display time measurement of the selected region. The timeline view may be scrolled both vertically and horizontally by dragging the \RMB{} right mouse button. Note that only the zones, locks and plots scroll vertically, while the time scale and frame sets always stay in place. You can zoom in and out the timeline view by using the \Scroll{} mouse scroll. You can select a range to which you want to zoom-in by dragging the \MMB{} middle mouse button. Dragging the \MMB{} middle mouse button while the \keys{\ctrl} key is pressed will zoom-out. \subsubsection{Annotating the trace} \label{annotatingtrace} Sometimes you may want to add notes to a trace. For example, you may want to mark a region to ignore, because the application was out-of-focus, or a region where a new user was connecting to the game, which resulted in a frame drop that needs to be investigated. To add an annotation, drag the \LMB{}~left mouse button over the timeline view, while holding the \keys{\ctrl} key. When the mouse key is released, a new annotation region will be added and a settings window will open (section~\ref{annotationsettings}), allowing you to enter a description. Annotations are displayed on the timeline, as presented on figure~\ref{annotation}. Clicking on the circle next to the text description will open the annotation settings window, in which you can modify or remove the region. \begin{figure}[h] \centering\begin{tikzpicture} \draw (0, 0.25) -- (0, 1) -- (5, 1) -- (5, 0.25); \draw[dotted] (0, -0.2) -- (0, 0.25); \draw[dotted] (5, -0.2) -- (5, 0.25); \draw (0.25, 0.75) circle(0.15); \draw (0.4, 0.7) node[anchor=west] {Description}; \end{tikzpicture} \caption{Annotation region.} \label{annotation} \end{figure} Please note that while the annotations persist between profiling sessions, they are not saved in the trace, but in the user data files, as described in section~\ref{tracespecific}. \subsection{Options menu} \label{options} In this window you can set various trace-related options. The timeline view might sometimes become overcrowded, in which case disabling display of some profiling events can increase readability. \begin{itemize} \item \emph{\faExpand{} Draw empty labels} -- By default threads that don't have anything to display at the current zoom level are hidden. Enabling this option will show them anyway. \item \emph{\faHiking{} Draw context switches} -- Allows disabling context switch display in threads. \begin{itemize} \item \emph{\faMoon{} Darken inactive thread} -- If enabled, inactive regions in threads will be dimmed out. \end{itemize} \item \emph{\faSlidersH{} Draw CPU data} -- Per-CPU behavior graph can be disabled here. \begin{itemize} \item \emph{\faSignature{} Draw CPU usage graph} -- You can disable drawing of the CPU usage graph here. \end{itemize} \item \emph{\faEye{} Draw GPU zones} -- Allows disabling display of OpenGL/Vulkan zones. The \emph{GPU zones} drop-down allows disabling individual GPU contexts and setting CPU/GPU drift offsets (see section~\ref{gpuprofiling} for more information). The \emph{\faRobot~Auto} button automatically measures the GPU drift value\footnote{There is an assumption that drift is linear. Automated measurement calculates and removes change over time in delay-to-execution of GPU zones. Resulting value may still be incorrect.}. \item \emph{\faMicrochip{} Draw CPU zones} -- Determines whether CPU zones are displayed. \begin{itemize} \item \emph{\faGhost{} Draw ghost zones} -- Controls if ghost zones should be displayed in threads which don't have any instrumented zones available. \item \emph{\faPalette{} Zone colors} -- Zones with no user-set color may be colored according to the following schemes: \begin{itemize} \item \emph{Disabled} -- A constant color (blue) will be used. \item \emph{Thread dynamic} -- Zones are colored according to a thread (identifier number) they belong to and depth level. \item \emph{Source location dynamic} -- Zone color is determined by source location (function name) and depth level. \end{itemize} \item \emph{\faBoxOpen{} Namespaces} -- controls display behavior of long zone names, which don't fit inside a zone box: \begin{itemize} \item \emph{Full} -- Zone names are always fully displayed (e.g.\ \texttt{std::sort}). \item \emph{Shortened} -- Namespaces are shortened to one letter (e.g.\ \texttt{s::sort}). \item \emph{None} -- Namespaces are completely omitted (e.g.\ \texttt{sort}). \end{itemize} \end{itemize} \item \emph{\faLock{} Draw locks} -- Controls the display of locks. If the \emph{Only contended} option is selected, the non-blocking regions of locks won't be displayed (see section~\ref{zoneslocksplots}). The \emph{Locks} drop-down allows disabling display of locks on a per-lock basis. As a convenience, the list of locks is split into the single-threaded and multi-threaded (contended and uncontended) categories. Clicking the \RMB{}~right mouse button on a lock label opens the lock information window (section~\ref{lockwindow}). \item \emph{\faSignature{} Draw plots} -- Allows disabling display of plots. Individual plots can be disabled in the \emph{Plots} drop-down. \item \emph{\faRandom{} Visible threads} -- Here you can select which threads are visible on the timeline. Display order of threads can be changed by dragging thread labels. \item \emph{\faImages{} Visible frame sets} -- Frame set display can be enabled or disabled here. Note that disabled frame sets are still available for selection in the frame set selection drop-down (section~\ref{controlmenu}), but are marked with a dimmed font. \end{itemize} Disabling display of some events is especially recommended when the profiler performance drops below acceptable levels for interactive usage. \subsection{Messages window} \label{messages} In this window you can see all the messages that were sent by the client application, as described in section~\ref{messagelog}. The window is split into four columns: \emph{time}, \emph{thread}, \emph{message} and \emph{call stack}. Hovering the \faMousePointer{}~mouse cursor over a message will highlight it on the timeline view. Clicking the \LMB{} left mouse button on a message will center the timeline view on the selected message. The \emph{call stack} column is filled only if a call stack capture was requested, as described in section~\ref{collectingcallstacks}. A single entry consists of the \emph{\faAlignJustify{}~Show} button, which opens the call stack information window (chapter~\ref{callstackwindow}) and of an abbreviated information about call path. If the \emph{\faImage{}~Show frame images} option is selected, hovering the \faMousePointer{}~mouse cursor over a message will show a tooltip containing frame image (see section~\ref{frameimages}) associated with frame in which the message was issued, if available. In a live capture, the message list will automatically scroll down to display the most recent message. This behavior can be disabled by manually scrolling the message list up. When the view is scrolled down to display the last message, the auto-scrolling feature will be enabled again. The message list can be filtered in the following ways: \begin{itemize} \item By the originating thread in the \emph{\faRandom{} Visible threads} drop-down. \item By matching the message text to the expression in the \emph{\faFilter{}~Filter messages} entry field. Multiple filter expressions can be comma-separated (e.g. 'warn, info' will match messages containing strings 'warn' \emph{or} 'info'). Matches can be excluded by preceding the term with a minus character (e.g. '-debug' will hide all messages containing string 'debug'). \end{itemize} \subsection{Statistics window} \label{statistics} Looking at the timeline view gives you a very localized outlook on things. Sometimes you want to take a look at the general overview of the program's behavior, for example you want to know which function takes the most of application's execution time. The statistics window provides you exactly that information. If the trace capture was performed with call stack sampling enabled (as described in chapter~\ref{sampling}), you will be presented with an option to switch between \emph{\faSyringe{}~Instrumentation} and \emph{\faEyeDropper{}~Sampling} modes. If no sampling data was collected, but symbols were retrieved, the second mode will be displayed as \emph{\faPuzzlePiece{}~Symbols}, enabling you to list available symbols. Otherwise only the instrumentation view will be present. \subsubsection{Instrumentation mode} Here you will find a multi-column display of captured zones, which contains: the zone \emph{name} and \emph{location}, \emph{total time} spent in the zone, the \emph{count} of zone executions and the \emph{mean time spent in the zone per call}. The view may be sorted according to the three displayed values. The \emph{\faClock{}~Self time} option determines how the displayed time is calculated. If it is disabled, the measurements will be inclusive, that is, containing execution time of zone's children. Enabling the option switches the measurement to exclusive, displaying just the time spent in zone, subtracting the child calls. Clicking the \LMB{} left mouse button on a zone will open the individual zone statistics view in the find zone window (section~\ref{findzone}). You can filter the displayed list of zones by matching the zone name to the expression in the \emph{\faFilter{}~Filter zones} entry field. Refer to section~\ref{messages} for a more detailed description of the expression syntax. \subsubsection{Sampling mode} \label{statisticssampling} Data displayed in this mode is in essence very similar to the instrumentation one. Here you will find function names, their locations in source code and time measurements. There are, however, some very important differences. First and foremost, the presented information is constructed from a number of call stack samples, which represent real addresses in the application's binary code, mapped to the line numbers in the source files. This reverse mapping may not be always possible, or may be erroneous. Furthermore, due to the nature of the sampling process, it is impossible to obtain exact time measurement. Instead, time values are guesstimated by multiplying number of sample counts by mean time between two distinct samples. The \emph{Name} column contains name of the function in which the sampling was done. If the \emph{\faSitemap{}~Inlines} option is enabled, functions which were inlined will be preceded with a '\faCaretRight{}' symbol and additionally display their parent function name in parenthesis. Otherwise, only non-inlined functions are listed, with count of inlined functions in parenthesis. Any entry containing inlined function may be expanded to display the corresponding functions list (some functions may be hidden if the \emph{\faPuzzlePiece{}~Show all} option is disabled, due to lack of sampling data). Clicking on a function name will open the call stack sample parents window (see chapter~\ref{sampleparents}). Note that if inclusive times are displayed, listed functions will be partially or completely coming from mid-stack frames, which will prevent, or limit the capability to display parent call stacks. The \emph{Location} column displays the corresponding source file name and line number. Depending on the \emph{Location} option selection it can either show function entry address, or the instruction at which the sampling was performed. The \emph{Entry} mode points at the beginning of a non-inlined function, or at the place where inlined function was inserted in its parent function. The \emph{Sample} mode is not useful for non-inlined functions, as it points to one randomly selected sampling point out of many that were captured. However, in case of inlined functions, this random sampling point is within the inlined function body. Using these options in tandem enable you to look at both the inlined function code and the place where it was inserted. If the \emph{Smart} location is selected, profiler will display entry point position for non-inlined functions and sample location for inlined functions. The location data is complemented by the originating executable image name, contained in the \emph{Image} column. Some function locations may not be found, due to insufficient debugging data available on the client side. To filter out such entries, use the \emph{\faEyeSlash{}~Hide unknown} option. The \emph{Time} or \emph{Count} column (depending on the \emph{\faStopwatch{}~Show time} option selection) shows number of taken samples, either as a raw count, or in an easier to understand time format. The last column, \emph{Code size}, displays the size of symbol in the executable image of the program. Since inlined routines are directly embedded into other functions, their symbol size will be based on the parent symbol, and displayed as 'less than'. In some cases this data won't be available. Finally, the list can be filtered using the \emph{\faFilter{}~Filter symbols} entry field, just like in the instrumentation mode case, and the exclusive/inclusive time counting mode can be switched using the \emph{\faClock{}~Self time} switch. If the \emph{\faPuzzlePiece{}~Show all} option is selected, the list will include not only call stack samples, but also all other symbols collected during the profiling process (this is enabled by default, if no sampling was performed). \subsection{Find zone window} \label{findzone} The individual behavior of zones may be influenced by many factors, like CPU cache effects, access times amortized by the disk cache, thread context switching, etc. Sometimes the execution time depends on the internal data structures and their response to different inputs. In other words, it is hard to determine the true performance characteristics by looking at any single zone. Tracy gives you the ability to display an execution time histogram of all occurrences of a zone. On this view you can see how the function behaves in general. You can inspect how various data inputs influence the execution time and you can filter the data to eventually drill down to the individual zone calls, so that you can see the environment in which they were called. You start by entering a search query, which will be matched against known zone names (see section~\ref{markingzones} for information on the grouping of zone names). If the search found some results, you will be presented with a list of zones in the \emph{matched source locations} drop-down. The selected zone's graph is displayed on the \emph{histogram} drop-down and also the matching zones are highlighted on the timeline view. Clicking the \RMB{} right mouse button on the source file location will open the source file view window (if applicable, see section~\ref{sourceview}). An example histogram is presented on figure~\ref{findzonehistogram}. Here you can see that the majority of zone calls (by count) are clustered in the 300~\si{\nano\second} group, closely followed by the 10~\si{\micro\second} cluster. There are some outliers at the 1~and~10~\si{\milli\second} marks, which can be ignored on most occasions, as these are single occurrences. \begin{figure}[h] \centering\begin{tikzpicture} \draw(0, 0) rectangle+(10, 3); \draw (0, -0.1) -- +(0, -0.7); \draw (0.6, -0.1) -- +(0, -0.1); \draw (0.96, -0.1) -- +(0, -0.1); \draw (1.2, -0.1) -- +(0, -0.1); \draw (1.4, -0.1) -- +(0, -0.1); \draw (1.56, -0.1) -- +(0, -0.1); \draw (1.68, -0.1) -- +(0, -0.1); \draw (1.8, -0.1) -- +(0, -0.1); \draw (1.9, -0.1) -- +(0, -0.1); \foreach \x in {2,4,6,8} { \draw (\x+0, -0.1) -- +(0, -0.2); \draw (\x+0.6, -0.1) -- +(0, -0.1); \draw (\x+0.96, -0.1) -- +(0, -0.1); \draw (\x+1.2, -0.1) -- +(0, -0.1); \draw (\x+1.4, -0.1) -- +(0, -0.1); \draw (\x+1.56, -0.1) -- +(0, -0.1); \draw (\x+1.68, -0.1) -- +(0, -0.1); \draw (\x+1.8, -0.1) -- +(0, -0.1); \draw (\x+1.9, -0.1) -- +(0, -0.1); } \draw (10, -0.1) -- +(0, -0.7); \draw (-0.2, -0.8) node[anchor=north west] {100 \si{\nano\second}}; \draw (1.8, -0.3) node[anchor=north west] {1 \si{\micro\second}}; \draw (3.8, -0.3) node[anchor=north west] {10 \si{\micro\second}}; \draw (5.8, -0.3) node[anchor=north west] {100 \si{\micro\second}}; \draw (7.8, -0.3) node[anchor=north west] {1 \si{\milli\second}}; \draw (10.1, -0.8) node[anchor=north east] {10 \si{\milli\second}}; \draw (4.8, -0.81) node[anchor=north] {\faLongArrowAltLeft~10~\si{\milli\second}~\faLongArrowAltRight}; \draw[pattern=north east lines] (0,0) -- (0.5, 0.3) -- (1, 2.95) -- (1.4, 0.6) -- (2, 0.15) -- (3, 0.2) -- (3.7, 0.5) -- (4, 2.1) -- (4.3, 0.7) -- (5, 0.2) -- (6, 0); \draw[pattern=north east lines] (7.8,0) -- (8, 0.15) -- (8.2, 0); \draw[pattern=north east lines] (9.8,0) -- (9.9, 0.1) -- (10, 0); \end{tikzpicture} \caption{Zone execution time histogram. Note that the extreme time labels and time range indicator (middle time value) are displayed in a separate line.} \label{findzonehistogram} \end{figure} The histogram is accompanied by various data statistics about displayed data, for example the \emph{total time} of the displayed samples, or the \emph{maximum number of counts} in histogram bins. The following options control how the data is presented: \begin{itemize} \item \emph{Log values} -- Switches between linear and logarithmic scale on the y~axis of the graph, representing the call counts\footnote{Or time, if the \emph{cumulate time} option is enabled.}. \item \emph{Log time} -- Switches between linear and logarithmic scale on the x~axis of the graph, representing the time bins. \item \emph{Cumulate time} -- Changes how the histogram bin values are calculated. By default the vertical bars on the graph represent the \emph{call counts} of zones that fit in the given time bin. If this option is enabled, the bars represent the \emph{time spent} in the zones. For example, on graph presented on figure~\ref{findzonehistogram} the 10~\si{\micro\second} cluster is the dominating one, if we look at the time spent in zone, even if the 300~\si{\nano\second} cluster has greater number of call counts. \item \emph{Self time} -- Removes children time from the analysed zones, which results in displaying only the time spent in the zone itself (or in non-instrumented function calls). Cannot be selected when \emph{Running time} is active. \item \emph{Running time} -- Removes time when zone's thread execution was suspended by the operating system due to preemption by other threads, waiting for system resources, lock contention, etc. Available only when context switch capture was performed (section~\ref{contextswitches}). Cannot be selected when \emph{Self time} is active. \item \emph{Minimum values in bin} -- Excludes display of bins which do not hold enough values at both ends of the time range. Increasing this parameter will eliminate outliers, allowing to concentrate on the interesting part of the graph. \end{itemize} You can drag the \LMB{} left mouse button over the histogram to select a time range that you want to closely look at. This will display the data in the histogram info section and it will also filter zones displayed in the \emph{found zones} section. This is quite useful, if you want to actually look at the outliers, i.e.\ where did they originate from, what the program was doing at the moment, etc\footnote{More often than not you will find out, that the application was just starting, or an access to a cold file was required and there's not much you can do to optimize that particular case.}. You can reset the selection range by pressing the \RMB{} right mouse button on the histogram. The \emph{found zones} section displays the individual zones grouped according to the following criteria: \begin{itemize} \item \emph{Thread} -- In this mode you can see which threads were executing the zone. \item \emph{User text} -- Splits the zones according to the custom user text (see section~\ref{markingzones}). \item \emph{Call stacks} -- Zones are grouped by the originating call stack (see section~\ref{collectingcallstacks}). Note that two call stacks may sometimes appear identical, even if they are not, due to an easy to overlook difference in the source line numbers. \item \emph{Parent} -- Groups zones according to the parent zone. This mode relies on the zone hierarchy, and \emph{not} on the call stack information. \item \emph{No grouping} -- Disables zone grouping. May be useful in cases when you just want to see zones in order as they appeared. \end{itemize} Each group may be sorted according to the \emph{order} in which it appeared, the call \emph{count}, the total \emph{time} spent in the group, or the \emph{mean time per call}. Expanding the group view will display individual occurrences of the zone, which can be sorted by application's time, execution time or zone's name. Clicking the \LMB{} left mouse button on a zone will open the zone information window (section~\ref{zoneinfo}). Clicking the \MMB{} middle mouse button on a zone will zoom the timeline view to the zone's extent. Clicking the \LMB{} left mouse button on group name will highlight the group time data on the histogram (figure~\ref{findzonehistogramgroup}). This function provides a quick insight about the impact of the originating thread, or input data on the zone performance. Clicking the \RMB{} right mouse button on the group names area will reset the group selection. \begin{figure}[h] \centering\begin{tikzpicture} \draw(0, 0) rectangle+(10, 3); \foreach \x in {0,2,4,6,8} { \draw (\x+0, -0.1) -- +(0, -0.2); \draw (\x+0.6, -0.1) -- +(0, -0.1); \draw (\x+0.96, -0.1) -- +(0, -0.1); \draw (\x+1.2, -0.1) -- +(0, -0.1); \draw (\x+1.4, -0.1) -- +(0, -0.1); \draw (\x+1.56, -0.1) -- +(0, -0.1); \draw (\x+1.68, -0.1) -- +(0, -0.1); \draw (\x+1.8, -0.1) -- +(0, -0.1); \draw (\x+1.9, -0.1) -- +(0, -0.1); } \draw (10, -0.1) -- +(0, -0.2); \draw (-0.2, -0.3) node[anchor=north west] {100 \si{\nano\second}}; \draw (1.8, -0.3) node[anchor=north west] {1 \si{\micro\second}}; \draw (3.8, -0.3) node[anchor=north west] {10 \si{\micro\second}}; \draw (5.8, -0.3) node[anchor=north west] {100 \si{\micro\second}}; \draw (7.8, -0.3) node[anchor=north west] {1 \si{\milli\second}}; \draw (9.8, -0.3) node[anchor=north west] {10 \si{\milli\second}}; \draw[pattern=north east lines] (0,0) -- (0.5, 0.3) -- (1, 2.95) -- (1.4, 0.6) -- (2, 0.15) -- (3, 0.2) -- (3.7, 0.5) -- (4, 2.1) -- (4.3, 0.7) -- (5, 0.2) -- (6, 0); \draw[pattern=north east lines] (7.8,0) -- (8, 0.15) -- (8.2, 0); \draw[pattern=north east lines] (9.8,0) -- (9.9, 0.1) -- (10, 0); \draw[pattern=north west lines] (3.2, 0) -- (3.75, 0.3) -- (3.95, 1.4) -- (4.2, 0); \end{tikzpicture} \caption{Zone execution time histogram with a group highlighted.} \label{findzonehistogramgroup} \end{figure} The call stack grouping mode has a different way of listing groups. Here only one group is displayed at any time, due to need to display the call stack frames. You can switch between call stack groups by using the~\faCaretLeft{}~and~\faCaretRight{} buttons. The group can be selected by clicking on the~\emph{\faCheck{}~Select} button (to reset the group selection use the~\RMB{}~right mouse button, as usual). You can open the call stack window (section~\ref{callstackwindow}) by pressing the~\emph{\faAlignJustify{}~Call~stack} button. Tracy displays a variety of statistical values regarding the selected function: mean (average value), median (middle value), mode (most common value, quantized using histogram bins), and \textsigma{} (standard deviation). The mean and median zone times are also displayed on the histogram as a red (mean) and blue (median) vertical bars. When a group is selected, additional bars will indicate the mean group time (orange) and median group time (green). You can disable drawing of either set of markers by clicking on the check-box next to the color legend. Hovering the \faMousePointer{}~mouse cursor over a zone on the timeline, which is currently selected in the find zone window, will display a pulsing vertical bar on the histogram, highlighting the bin to which the hovered zone has been assigned. Zone entry on the zone list will also be highlighted. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bclampe ]{Keyboard shortcut} You may press \keys{\ctrl + F} to open or focus the find zone window and set the keyboard input on the search box. \end{bclogo} \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} When using the execution times histogram you must be aware about the hardware peculiarities. Read section~\ref{checkenvironmentcpu} for more detail. \end{bclogo} \subsubsection{Timeline interaction} When the zone statistics are displayed in the find zone menu, matching zones will be highlighted on the timeline display. Highlight colors match the histogram display. Bright blue highlight is used to indicate that a zone is in the optional selection range, while the yellow highlight is used for the rest of zones. \subsubsection{Frame time graph interaction} \label{frametimefindzone} The frame time graph (section~\ref{frametimegraph}) behavior is altered when a zone is displayed in the find zone window and the \emph{Show zone time in frames} option is selected. Instead of coloring the frame bars according to the frame time targets, an accumulated zone execution time is shown. Each bar is drawn in gray color, with the white part accounting for the zone time. If the execution time is greater than the frame time (this is possible if more than one thread was executing the same zone), the overflow will be displayed using red color. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Caveats} The displayed data might not be calculated correctly and some zones may not be included in the reported times. \end{bclogo} \subsubsection{Limiting zone time range} If the \emph{limit range} option is selected, only the zones within the specified time range will be included in the data. To indicate that the display is locked to a subset of all zones, a \faLock{}~lock icon will be displayed, and a yellow highlight matching the specified range will be present on the timeline view. \subsection{Compare traces window} \label{compare} Comparing the performance impact of the optimization work is not an easy thing to do. Benchmarking is often inconclusive, if even possible, in case of interactive applications, where the benchmarked function might not have a visible impact on frame render time. Doing isolated micro-benchmarks loses the execution environment of the application, in which many different functions compete for limited system resources. Tracy solves this problem by providing a compare traces functionality, very similar to the find zone window, described in section~\ref{findzone}. Traces can be compared either by zone or frame timing data. You would begin your work by recording a reference trace that represents the usual behavior of the program. Then, after the optimization of the code is completed, you record another trace, doing roughly what you did for the reference one. Having the optimized trace open you select the \emph{\faFolderOpen{}~Open second trace} option in the compare traces window and load the reference trace. Now things start to get familiar. You search for a zone, similarly like in the find zone window, choose the one you want in the \emph{matched source locations} drop-down, and then you look at the histogram\footnote{When comparing frame times you are presented with a list of available frame sets, without the search box.}. This time there are two overlaid graphs, one representing the current trace, and the second one representing the external (reference) trace (figure~\ref{comparehistogram}). You can easily see how the performance characteristics of the zone were affected by your modifications. \begin{figure}[h] \centering\begin{tikzpicture} \draw(0, 0) rectangle+(10, 3); \foreach \x in {0,2,4,6,8} { \draw (\x+0, -0.1) -- +(0, -0.2); \draw (\x+0.6, -0.1) -- +(0, -0.1); \draw (\x+0.96, -0.1) -- +(0, -0.1); \draw (\x+1.2, -0.1) -- +(0, -0.1); \draw (\x+1.4, -0.1) -- +(0, -0.1); \draw (\x+1.56, -0.1) -- +(0, -0.1); \draw (\x+1.68, -0.1) -- +(0, -0.1); \draw (\x+1.8, -0.1) -- +(0, -0.1); \draw (\x+1.9, -0.1) -- +(0, -0.1); } \draw (10, -0.1) -- +(0, -0.2); \draw (-0.2, -0.3) node[anchor=north west] {100 \si{\nano\second}}; \draw (1.8, -0.3) node[anchor=north west] {1 \si{\micro\second}}; \draw (3.8, -0.3) node[anchor=north west] {10 \si{\micro\second}}; \draw (5.8, -0.3) node[anchor=north west] {100 \si{\micro\second}}; \draw (7.8, -0.3) node[anchor=north west] {1 \si{\milli\second}}; \draw (9.8, -0.3) node[anchor=north west] {10 \si{\milli\second}}; \draw[pattern=north east lines] (0,0) -- (0.5, 0.3) -- (1, 2.95) -- (1.4, 1) -- (2, 0.5) -- (7, 0); \draw[pattern=north west lines] (0.5,0) -- (1, 0.4) -- (1.7, 2.8) -- (2.1, 1.1) -- (2.7, 0.6) -- (5, 0.2) -- (10, 0); \end{tikzpicture} \caption{Compare traces histogram.} \label{comparehistogram} \end{figure} Note that the traces are color and symbol coded. The current trace is marked by a yellow \faLemon{} symbol, and the external one is marked by a red \faGem{} symbol. When searching for source locations it's not uncommon to match more than one zone (for example a search for \texttt{Draw} may result in \texttt{DrawCircle} and \texttt{DrawRectangle} matches). Typically you wouldn't want to compare execution profiles of two unrelated functions, which is prevented by the \emph{link selection} option, which ensures that when you choose a source location in one trace, the corresponding one is also selected in second trace. Be aware that this may still result in a mismatch, for example if you have overloaded functions. In such case you will need to manually select the appropriate function in the other trace. It may be difficult, if not impossible, to perform identical runs of a program. This means that the number of collected zones may differ in both traces, which would influence the displayed results. To fix this problem enable the \emph{Normalize values} option, which will adjust the displayed results as-if both traces had the same number of recorded zones. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bclampe ]{Trace descriptions} Set custom trace descriptions (see section~\ref{traceinfo}) to easily differentiate the two loaded traces. If no trace description is set, a name of the profiled program will be displayed along with the capture time. \end{bclogo} \subsection{Memory window} \label{memorywindow} The data gathered by profiling memory usage (section~\ref{memoryprofiling}) can be viewed in the memory window. The top row contains statistics, such as \emph{total allocations} count, number of \emph{active allocations}, current \emph{memory usage} and process \emph{memory span}\footnote{Memory span describes the address space consumed by the program. It is calculated as a difference between the maximum and minimum observed in-use memory address.}. The lists of captured memory allocations are displayed in a common multi-column format thorough the profiler. The first column specifies the memory address of an allocation, or an address and an offset, if the address is not at the start of the allocation. Clicking the \LMB{} left mouse button on an address will open the memory allocation information window\footnote{While the allocation information window is opened, the address will be highlighted on the list.} (see section~\ref{memallocinfo}). Clicking the \MMB{}~middle mouse button on an address will zoom the timeline view to memory allocation's range. The next column contains the allocation size. The allocation's timing data is contained in two columns: \emph{appeared at} and \emph{duration}. Clicking the \LMB{}~left mouse button on the first one will center the timeline view at the beginning of allocation, and likewise, clicking on the second one will center the timeline view at the end of allocation. Note that allocations that have not yet been freed will have their duration displayed in green color. The memory event location in the code is displayed in the last four columns. The \emph{thread} column contains the thread where the allocation was made and freed (if applicable), or an \emph{alloc / free} pair of threads, if it was allocated in one thread and freed in another. The \emph{zone alloc} contains the zone in which the allocation was performed\footnote{The actual allocation is typically a couple functions deeper in the call stack.}, or \texttt{-} if there was no active zone in the given thread at the time of allocation. Clicking the \LMB{}~left mouse button on the zone name will open the zone information window (section~\ref{zoneinfo}). Similarly, the \emph{zone free} column displays the zone which freed the allocation, which may be colored yellow, if it is the same exact zone that did the allocation. Alternatively, if the zone has not yet been freed, a green \emph{active} text is displayed. The last column contains the \emph{alloc} and \emph{free} call stack buttons, or their placeholders, if no call stack is available (see section~\ref{collectingcallstacks} for more information). Clicking on either of the buttons will open the call stack window (section~\ref{callstackwindow}). Note that the call stack buttons that match the information window will be highlighted. The memory window is split into the following sections: \subsubsection{Allocations} The \emph{\faAt{} Allocations} pane allows you to search for the specified address usage during the whole life-time of the program. All recorded memory allocations that match the query will be displayed on a list. \subsubsection{Active allocations} The \emph{\faHeartbeat{} Active allocations} pane displays a list of currently active memory allocations and their total memory usage. Here you can see where exactly your program did allocate memory it is currently using. If the application has already exited, this becomes a list of leaked memory. \subsubsection{Memory map} On the \emph{\faMap{} Memory map} pane you can see the graphical representation of your program's address space. Active allocations are displayed as green lines, while the freed memory is marked as red lines. The brightness of the color indicates how much time has passed since the last memory event at the given location -- the most recent events are the most vibrant. This view may be helpful in assessing the general memory behavior of the application, or in debugging the problems resulting from address space fragmentation. \subsubsection{Bottom-up call stack tree} \label{callstacktree} The \emph{\faAlignJustify{}~Bottom-up call stack tree} pane is only available, if the memory events were collecting the call stack data (section~\ref{collectingcallstacks}). In this view you are presented with a tree of memory allocations, starting at the call stack entry point and going up to the allocation's pinpointed place. Each level of the tree is sorted according to the number of bytes allocated in given branch. Each tree node consists of three elements: the function name, the source file location and the memory allocation data. The memory allocation data is either yellow \emph{inclusive} events count (allocations performed by children), or the cyan \emph{exclusive} events count (allocations that took place in the node)\footnote{Due to the way call stacks work there is no possibility for an entry to have both inclusive and exclusive counts, in a properly instrumented program.}. There are two values that are counted: total memory size and number of allocations. The \emph{Group by function name} option controls how tree nodes are grouped. If it is disabled, then the grouping is performed at a machine instruction level granularity. This may result in very verbose output, but the displayed source locations are precise. To make the tree more readable you may opt to perform grouping at the function name level, which will result in less valid source file locations, as multiple entries are collapsed into one. Enabling the \emph{Only active allocations} option will limit the call stack tree to only display active allocations. Clicking the \RMB{}~right mouse button on the function name will open allocations list window (see section \ref{alloclist}), which list all the allocations included at the current call stack tree level. Clicking the \RMB{}~right mouse button on the source file location will open the source file view window (if applicable, see section~\ref{sourceview}). Some function names may be too long to be properly displayed, with the events count data at the end. In such cases, you may press the \emph{control} button, which will display events count tooltip. \subsubsection{Top-down call stack tree} This pane is identical in functionality to the \emph{Bottom-up call stack tree}, but the call stack order is reversed when the tree is built. This means that the tree starts at the memory allocation functions and goes down to the call stack entry point. \subsubsection{Looking back at the memory history} By default the memory window displays the memory data at the current point of program execution. It is however possible to view the historical data by enabling the \emph{\faHistory{}~Restrict time} option. This will draw a vertical violet line on the timeline view, which will act as a terminator for memory events. The memory window will use only the events lying on the left side of the terminator line (in the past), ignoring everything that's on the right side. \subsection{Allocations list window} \label{alloclist} This window displays the list of allocations included at the selected call stack tree level (see section~\ref{memorywindow} and \ref{callstacktree}). \subsection{Memory allocation information window} \label{memallocinfo} The information about the selected memory allocation is displayed in this window. It lists the allocation's address and size, along with the time, thread and zone data of the allocation and free events. Clicking the \emph{\faMicroscope{}~Zoom to allocation} button will zoom the timeline view to the allocation's extent. \subsection{Trace information window} \label{traceinfo} This window contains information about the current trace: captured program name, time of the capture, profiler version which performed the capture and a custom trace description, which you can fill in. Open the \emph{Trace statistics} section to see information about the trace, such as achieved timer resolution, number of captured zones, lock events, plot data points, memory allocations, etc. There's also a section containing the selected frame set timing statistics and histogram\footnote{See section~\ref{findzone} for a description of the histogram. Note that there are subtle differences in the available functionality.}. As a convenience you can switch the active frame set here and limit the displayed frame statistics to the frame range visible on the screen. If \emph{CPU topology} data is available (see section~\ref{cputopology}), you will be able to view the package, core and thread hierarchy. The \emph{Source location substitutions} section allows adapting the source file paths, as captured by the profiler to the actual on-disk locations. You can create a new substitution by clicking the \emph{Add new substitution} button. This will add a new entry, with input fields for ECMAScript-conforming regular expression pattern and its corresponding replacement string. The outcome of substitutions can be quickly tested in the \emph{example source location} input field, which will be transformed and displayed below, as \emph{result}. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bclampe ]{Quick example} Let's say we have an unix-based operating system with program sources in \texttt{/home/user/program/src/} directory. We have also performed a capture of an application running under Windows, with sources in \texttt{C:\textbackslash{}Users\textbackslash{}user\textbackslash{}Desktop\textbackslash{}program\textbackslash{}src} directory. Obviously, the source locations don't match and the profiler can't access the source files we have on our disk. We can fix that by adding two substitution patterns: \begin{itemize} \item \texttt{\^{}C:\textbackslash{}\textbackslash{}Users\textbackslash{}\textbackslash{}user\textbackslash{}\textbackslash{}Desktop} \hspace{1em}\textrightarrow\hspace{1em} \texttt{/home/user} \item \texttt{\textbackslash{}\textbackslash{}} \hspace{1em}\textrightarrow\hspace{1em} \texttt{/} \end{itemize} \end{bclogo} In this window you can view the information about the machine on which the profiled application was running. This includes the operating system, used compiler, CPU name, amount of total available RAM, etc. If application information was provided (see section~\ref{appinfo}), it will also be displayed here. If an application should crash during profiling (section~\ref{crashhandling}), the crash information will be displayed in this window. It provides you information about the thread that has crashed, the crash reason and the crash call stack (section~\ref{callstackwindow}). \subsection{Zone information window} \label{zoneinfo} The zone information window displays detailed information about a single zone. There can be only one zone information window open at any time. While the window is open the zone will be highlighted on the timeline view with a green outline. The following data is presented: \begin{itemize} \item Basic source location information: function name, source file location and the thread name. \item Timing information. \item If context switch capture was performed (section~\ref{contextswitches}) and a thread was suspended during zone execution, a list of wait regions will be displayed, with complete information about timing, CPU migrations and wait reasons. If CPU topology data is available (section~\ref{cputopology}), zone migrations across cores will be marked with 'C', and migrations across packages -- with 'P'. In some cases context switch data might be incomplete\footnote{For example, when a capture is ongoing and context switch information has not yet been received.}, in which case a warning message will be displayed. \item Memory events list, both summarized and a list of individual allocation/free events (see section~\ref{memorywindow} for more information on the memory events list). \item List of messages that were logged in the zone's scope (including its children). \item Zone trace, taking into account the zone tree and call stack information (section~\ref{collectingcallstacks}), trying to reconstruct a combined zone + call stack trace\footnote{Reconstruction is only possible, if all zones have full call stack capture data available. In case where that's not available, an \emph{unknown frames} entry will be present.}. Captured zones are displayed as normal text, while functions that were not instrumented are dimmed. Hovering the \faMousePointer{}~mouse pointer over a zone will highlight it on the timeline view with a red outline. Clicking the \LMB{}~left mouse button on a zone will switch the zone info window to that zone. Clicking the \MMB{}~middle mouse button on a zone will zoom the timeline view to the zone's extent. Clicking the \RMB{}~right mouse button on a source file location will open the source file view window (if applicable, see section~\ref{sourceview}). \item Child zones list, showing how the current zone's execution time was used. Zones on this list can be grouped according to their source location. Each group can be expanded to show individual entries. All the controls from the zone trace are also available here. \item Time distribution in child zones, which expands the information provided in the child zones list by processing \emph{all} zone children (including multiple levels of grandchildren). This results in a statistical list of zones that were really doing the work in the current zone's time span. If a group of zones is selected on this list, the find zone window (section~\ref{findzone}) will open, with time range limited to show only the children of the current zone. \end{itemize} The zone information window has the following controls available: \begin{itemize} \item \emph{\faMicroscope{} Zoom to zone} -- Zooms the timeline view to the zone's extent. \item \emph{\faArrowUp{} Go to parent} -- Switches the zone information window to display current zone's parent zone (if available). \item \emph{\faChartBar{} Statistics} -- Displays the zone general performance characteristics in the find zone window (section~\ref{findzone}). \item \emph{\faAlignJustify{} Call stack} -- Views the current zone's call stack in the call stack window (section~\ref{callstackwindow}). The button will be highlighted, if the call stack window shows the zone's call stack. Only available if zone had captured call stack data (section~\ref{collectingcallstacks}). \item \emph{\faFile*{} Source} -- Display source file view window with the zone source code (only available if applicable, see section~\ref{sourceview}). Button will be highlighted, if the source file is being currently displayed (but the focused source line might be different). \item \emph{\faArrowLeft{} Go back} -- Returns to the previously viewed zone. The viewing history is lost when the zone information window is closed, or when the type of displayed zone changes (from CPU to GPU or vice versa). \end{itemize} \subsection{Call stack window} \label{callstackwindow} This window shows the frames contained in the selected call stack. Each frame is described by a function name, source file location and originating image\footnote{Executable images are called \emph{modules} by Microsoft.} name. Clicking the \LMB{}~left mouse button on either the function name of source file location will copy the name to the clipboard. Clicking the \RMB{}~right mouse button on the source file location will open the source file view window (if applicable, see section~\ref{sourceview}). A single stack frame may have multiple function call places associated with it. This happens in case of inlined function calls. Such entries will be displayed in the call stack window, with \emph{inline} in place of frame number\footnote{Or '\faCaretRight{}'~icon in case of call stack tooltips.}. Stack frame location may be displayed in the following number of ways, depending on the \emph{\faAt{}~Frame location} option selection: \begin{itemize} \item \emph{Source code} -- displays source file and line number associated with the frame. \item \emph{Entry point} -- source code at the beginning of the function containing selected frame, or function call place in case of inline frames. \item \emph{Return address} -- shows return address, which may be used to pinpoint the exact instruction in the disassembly. \item \emph{Symbol address} -- displays begin address of the function containing the frame address. \end{itemize} In some cases it may be not possible to properly decode stack frame address. Such frames will be presented with a dimmed '\texttt{[ntdll.dll]}' name of the image containing the frame address, or simply '\texttt{[unknown]}' if even this information cannot be retrieved. Additionally, '\texttt{[kernel]}' is used to indicate unknown stack frames within the operating system internal routines. If the displayed call stack is a sampled call stack (chapter~\ref{sampling}), an additional button will be available, \emph{\faDoorOpen{}~Global entry statistics}. Clicking it will open the call stack sample parents window (chapter~\ref{sampleparents}) for the current call stack. \subsubsection{Reading call stacks} \label{readingcallstacks} You need to take special care when reading call stacks. Contrary to their name, call stacks do not show \emph{function call stacks}, but rather \emph{function return stacks}. This might be a bit confusing at first, but this is how programs do work. Consider the following source code: \begin{lstlisting} int main() { auto app = std::make_unique<Application>(); app->Run(); app.reset(); } \end{lstlisting} Let's say you are looking at the call stack of some function called within \texttt{Application::Run}. This is the result you might get: \begin{lstlisting} 0. @\ldots@ 1. @\ldots@ 2. Application::Run 3. std::unique_ptr<Application>::reset 4. main \end{lstlisting} At the first glance it may look like \texttt{unique\_ptr::reset} was the \emph{call site} of the \texttt{Application::Run}, which would make no sense, but this is not the case here. When you remember these are the \emph{function return points}, it becomes much more clear what is happening. As an optimization, \texttt{Application::Run} is returning directly into \texttt{unique\_ptr::reset}, skipping the return to \texttt{main} and an unnecessary \texttt{reset} function call. Moreover, the linker may determine in some rare cases that any two functions in your program are identical\footnote{For example if all they do is zero-initialize a region of memory. As some constructors would do.}. As a result, only one copy of the binary code will be provided in the executable for both functions to share. While this optimization produces more compact programs, it also means that there's no way to distinguish the two functions apart in the resulting machine code. In effect, some call stacks may look nonsensical until you perform a small investigation. \subsection{Call stack sample parents window} \label{sampleparents} This window displays statistical information about the selected symbol. All sampled call stacks (chapter~\ref{sampling}) leading to the symbol are counted and displayed in descending order. You can select the displayed call stack using the \emph{parent call stack} controls, which also display time spent in the chosen call stack. Alternatively, sample counts may be shown by disabling the \emph{\faStopwatch{}~Show time} option, which is described in more detail in chapter~\ref{statisticssampling}. The layout of frame list and the \emph{\faAt{}~Frame location} option selection is similar to the call stack window, described in chapter~\ref{callstackwindow}. \subsection{Source file view window} \label{sourceview} In this window you can view the source code of the profiled application, to take a quick glance at the context of the function behavior you are analyzing. The selected line (for example, a location of a profiling zone) will be highlighted both in the source code listing and on the scroll bar. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} Source file view works on the local files you have on your disk. The traces themselves do not contain any source code! This has the following implications: \begin{itemize} \item Source files can only be viewed, if the source file location recorded in the trace matches the files you have on your disk. See section~\ref{traceinfo} for information on redirecting source file locations. \item Time stamp of the source file cannot be newer than the trace, as it typically would indicate that the file has been changed and no longer contains the code that was profiled. \item \textbf{The displayed source files might not reflect the code that was profiled!} It is up to you to verify that you don't have a modified version of the code, with regards to the trace. \end{itemize} \end{bclogo} \subsubsection{Symbol view} If the inspected source location has an associated symbol context (i.e. if it comes from a call stack capture, from call stack sampling, etc.), a much more capable symbol view is available. A symbol is an unit of machine code, basically a callable function. It may be generated using multiple source files and may consist of multiple inlined functions. A list of all captured symbols is available in the statistics window, as described in chapter~\ref{statisticssampling}. The header of symbol view window contains a name of the selected \emph{\faPuzzlePiece{}~symbol}, a list of \emph{\faSitemap{}~functions} that contribute to the symbol, and information such as \emph{\faWeightHanging{}~Code size} in the program, or count of probed \emph{\faEyeDropper{}~Samples}. Additionally, you may use the \emph{Mode} selector to decide what content should be displayed in the panels below: \begin{itemize} \item \emph{Source} -- only the source code will be displayed. \item \emph{Assembly} -- only the machine code disassembly will be shown. \item \emph{Combined} -- both source code and disassembly will be listed next to each other. \end{itemize} In some circumstances (missing or outdated source files, lack of machine code) some modes may be unavailable. \paragraph{Source mode} This is pretty much the original source file view window, but with the ability to select one of the source files that were used to build the symbol. Additionally, each source file line which produced machine code in the symbol will show count of associated assembly instructions, displayed with an '\texttt{@}' prefix, and will be marked with a grey color on the scroll bar. Due to the way optimizing compilers work, some lines may seemingly not produce any machine code, for example because iterating a loop counter index might have been reduced to advancing a data pointer. Some other lines may have disproportionate amount of associated instructions, e.g. when a loop unrolling optimization is applied. This varies from case to case and from compiler to compiler. \paragraph{Assembly mode} This mode shows the disassembly of the symbol machine code. Each assembly instruction is displayed listed with its location in the program memory during execution. If the \emph{\faSearchLocation{}~Relative locations} option is selected, an offset from the symbol beginning will be printed instead. If the \emph{\faFileImport{}~Source locations} option is selected, each line of the assembly code will also contain information about the originating source file name and line number. For easier differentiation between different source files, each file is assigned its own color. Clicking the \LMB{}~left mouse button on a displayed source location will switch the source file, if necessary, and focus the source view on selected line. Selecting the \emph{\faCogs{}~Machine code} option will enable display of raw machine code bytes for each line. If any instruction would jump to a predefined address, symbolic name of the jump target will be additionally displayed. If the destination location is within the currently displayed symbol an \texttt{->}~arrow will be prepended to the name. Hovering the \faMousePointer{}~mouse pointer over such symbol name will highlight the target location. Clicking on it with the \LMB{}~left mouse button will focus the view on the destination instruction, or switch view to the destination symbol. Enabling the \emph{\faShare{}~Jumps} option will show jumps within the symbol code as a series of arrows from the jump source to the jump target. Hovering the \faMousePointer{}~mouse pointer over the jump arrow will display jump information tooltip and will also draw the jump range on the scroll bar, as a green line. Jump target location will be marked by a horizontal green line. Jumps going out of the symbol\footnote{This includes jumps, procedure calls and returns. For example, in x86 assembly the respective operand names can be: \texttt{jmp}, \texttt{call}, \texttt{ret}.} will be indicated by a smaller arrow pointing away from the code. The \emph{AT\&T} switch can be used to select between \emph{Intel} and \emph{AT\&T} assembly syntax. Beware that microarchitecture data is only available if Intel syntax is selected. Unlike the source file view, portions of the executable are stored within the captured profile and don't rely on the local disk files being available. \subparagraph{Exploring microarchitecture} If the listed assembly code targets x86 or x64 instruction set architectures, hovering \faMousePointer{}~mouse pointer over an instruction will display a tooltip with microarchitectural data, based on measurements made in \cite{Abel19a}. \emph{This information is retrieved from instruction cycle tables, and does not represent true behavior of the profiled code.} Reading the cited article will give you a detailed definition of the presented data, but here's a quick (and inaccurate) explanation: \begin{itemize} \item \emph{Throughput} -- How many cycles are required to execute an instruction in a stream of independent same instructions. For example, if two independent \texttt{add} instructions may be executed simultaneously on different execution units, then the throughput (cycle cost per instruction) is 0.5. \item \emph{Latency} -- How many cycles it takes for an instruction to finish executing. This is reported as a min-max range, as some output values may be available earlier than the rest. \item \emph{\textmu{}ops} -- How many microcode operations have to be dispatched for an instruction to retire. For example, adding a value from memory to a register may consist of two microinstructions: first load the value from memory, then add it to the register. \item \emph{Ports} -- Which ports (execution units) are required for dispatch of microinstructions. For example, \texttt{2*p0+1*p015} would mean that out of the three microinstructions implementing the assembly instruction, two can only be executed on port 0, and one microinstruction can be executed on ports 0, 1, or 5. Number of available ports and their capabilities vary between different processors architectures. Refer to \url{https://wikichip.org/} for more information. \end{itemize} Selection of the CPU microarchitecture can be performed using the \emph{\faMicrochip{}~\textmu{}arch} drop-down. Each architecture is accompanied with a name of an example CPU implementing it. Enabling the \emph{\faTruckLoading{}~Latency} option will display graphical representation of instruction latencies on the listing. Minimum latency of an instruction is represented with a red bar, while the maximum latency is represented by a yellow bar. Clicking on the \emph{\faFileImport{}~Save} button lets you write the disassembly listing to a file. You can then manually extract some critical loop kernel and pass it to a CPU simulator, such as \emph{LLVM Machine Code Analyzer} (\texttt{llvm-mca})\footnote{\url{https://llvm.org/docs/CommandGuide/llvm-mca.html}}, in order to see how the code is executed and if there are any pipeline bubbles. Consult the \texttt{llvm-mca} documentation for more details. \subparagraph{Instruction dependencies} Assembly instructions may read values stored in registers and may also write values to registers. A dependency between two instructions is created when one produces some result, which is then consumed by the other one. Combining this dependency graph with information about instruction latencies may give deep understanding of the bottlenecks in code performance. Clicking the \LMB{}~left mouse button on any assembly instruction will mark it as a target for resolving register dependencies between instructions. To cancel this selection, click on any assembly instruction with \RMB{}~right mouse button. The selected instruction will be highlighted in red, while its dependencies will be highlighted in violet. Additionally, a list of dependent registers will be listed next to each instruction which reads or writes to them, with the following color code: \begin{itemize} \item \emph{Green} -- Register value is read (is a dependency \emph{after} target instruction). \item \emph{Red} -- A value is written to a register (is a dependency \emph{before} target instruction). \item \emph{Yellow} -- Register is read and then modified. \item \emph{Grey} -- Value in a register is either discarded (overwritten), or was already consumed by an earlier instruction (i.e. it is readily available\footnote{This is actually a bit of simplification. Run a pipeline simulator, e.g. \texttt{llvm-mca} for a better analysis.}). Dependency will be not followed further. \end{itemize} Search for dependencies follows program control flow, so there may be multiple producers and consumers for any single register. While the \emph{after} and \emph{before} guidelines mentioned above hold in general case, things may be more complicated when there's a large amount of conditional jumps in the code. Note that dependencies further away than 64 instructions are not displayed. For easier navigation, dependencies are also marked on the left side of the scroll bar, following the green, red and yellow convention. The selected instruction is marked in blue. \paragraph{Combined mode} In this mode both the source and assembly panes will be displayed together, providing the best way to gain insight into the code. Hovering the \faMousePointer{}~mouse pointer over the source file line, or the location of the assembly line will highlight the corresponding lines in the second pane (both in the listing and on the scroll bar). Clicking the \LMB{}~left mouse button on a line will select it in both panes. Clicking the \RMB{}~right mouse button will also focus the secondary view on the selected line (or first of the selected lines, if more than one). \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} An assembly instruction may be associated with only a single source line, but a source line might be associated with multiple assembly lines, sometimes intermixed with other assembly instructions. \end{bclogo} \paragraph{Instruction pointer statistics} If automated call stack sampling (see chapter~\ref{sampling}) was performed, additional profiling information will be available. The first column of source and assembly views will contain percentage counts of collected instruction pointer samples for each displayed line, both in numerical and graphical bar form. This information can be used to determine which line of the function takes the most time. The displayed percentage values are heat map color coded, with the lowest values mapped to dark red, and the highest values mapped to bright yellow. The color code will appear next to the percentage value, and on the scroll bar, so that 'hot' places in code can be identified at a glance. Instruction timings can be viewed as a group. To begin constructing such group, click the \LMB{}~left mouse button on the percentage value. Additional instructions can be added using the \keys{\ctrl}~key, while holding the \keys{\shift}~key will allow selection of a range. To cancel the selection, click the \RMB{}~right mouse button on a percentage value. Group statistics can be seen at the bottom of the pane. Sample data source is controlled by the \emph{\faSitemap{}~Function} control, in the window header. If this option is disabled, the sample data represents the whole symbol. If it is enabled, then the sample data will only include the selected function. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcbombe ]{Important} Be aware that the data is not fully accurate, as it is the result of random sampling of program execution. Furthermore, undocumented implementation details of an out-of-order CPU architecture will highly impact the measurement. Read chapter~\ref{checkenvironmentcpu} to see the tip of an iceberg. \end{bclogo} \subsection{Lock information window} \label{lockwindow} This window presents information and statistics about a lock. The lock events count represents the total number collected of wait, obtain and release events. The announce, termination and lock lifetime measure the time from the lockable construction until destruction. \subsection{Frame image playback window} \label{playback} You may view a live replay of the profiled application screen captures (see section~\ref{frameimages}) using this window. Playback is controlled by the \emph{\faPlay~Play} and \emph{\faPause~Pause} buttons and the \emph{Frame image} slider can be used to scrub to the desired time stamp. Alternatively you may use the \emph{\faCaretLeft} and \emph{\faCaretRight} buttons to change single frame back or forward. If the \emph{Sync timeline} option is selected, the timeline view will be focused on the frame corresponding to the currently displayed screen shot. The \emph{Zoom 2$\times$} option enlarges the image, for easier viewing. Each displayed frame image is also accompanied by the following parameters: \emph{timestamp}, showing at which time the image was captured, \emph{frame}, displaying the numerical value of corresponding frame, and \emph{ratio}, telling how well the in-memory loss-less compression was able to reduce the image data size. \subsection{CPU data window} \label{cpudata} Statistical data about all processes running on the system during the capture is available in this window, if context switch capture (section~\ref{contextswitches}) was performed. Each running program has an assigned process identifier (PID), which is displayed in the first column. If a program entry is expanded, a list of thread identifiers (TIDs) will also be displayed. The \emph{running time} column shows how much processor time was used by a process or thread. The percentage may be over 100\%, as it is scaled to trace length and multiple threads belonging to a single program may be executing simultaneously. The \emph{running regions} column displays how many times a given entry was in the \emph{running} state and the \emph{CPU migrations} shows how many times an entry was moved from one CPU core to another, when an entry was suspended by the system scheduler. The profiled program is highlighted using green color. Furthermore, yellow highlight indicates threads which are known to the profiler (that is, which sent events due to instrumentation). \subsection{Annotation settings window} \label{annotationsettings} In this window you may modify how a timeline annotation (section~\ref{annotatingtrace}) is presented by setting its text description, or selecting region highlight color. If the note is no longer needed, it may also be removed here. \subsection{Annotation list window} \label{annotationlist} This window lists all annotations marked on the timeline. Each annotation is presented, as shown on figure~\ref{figannlist}. From left to right the elements are: \begin{itemize} \item \emph{\faEdit{} Edit} -- Opens the annotation settings window (section~\ref{annotationsettings}). \item \emph{\faMicroscope{} Zoom} -- Zooms timeline to the annotation extent. \item \emph{\faTrash*{} Remove} -- Removes the annotation. You must press the \keys{\ctrl} key to enable this button. \item Colored box -- Color of the annotation. \item Text description of the annotation. \end{itemize} \begin{figure}[h] \centering\begin{tikzpicture} \draw[rounded corners=5pt] (0.0, 0) rectangle+(0.5, -0.5) node [midway] {\faEdit}; \draw[rounded corners=5pt] (0.6, 0) rectangle+(0.5, -0.5) node [midway] {\faMicroscope}; \draw[rounded corners=5pt] (1.2, 0) rectangle+(0.5, -0.5) node [midway] {\faTrash*}; \draw[rounded corners=5pt, pattern=crosshatch dots] (1.8, 0) rectangle+(0.5, -0.5); \draw[rounded corners=5pt] (2.4, 0) node[anchor=north west] {Text description}; \end{tikzpicture} \caption{Annotation list entry} \label{figannlist} \end{figure} \section{Importing external profiling data} Tracy can import data generated by other profilers. This external data cannot be directly loaded, but must be converted first. Currently there's only support for converting chrome:tracing data, through the \texttt{import-chrome} utility. \begin{bclogo}[ noborder=true, couleur=black!5, logo=\bcattention ]{Limitations} \begin{itemize} \item Tracy is a single-process profiler. There is no differentiation between data coming from different pids. \item Tracy uses thread identifiers assigned by the operating system. This means that no two concurrent threads can have the same tid. Be aware that some external data formats may encourage usage of duplicated thread identifiers. \item The imported data may be severely limited, either by not mapping directly to the data structures used by Tracy, or by following undocumented practices. \end{itemize} \end{bclogo} \section{Configuration files} While the client part doesn't read or write anything to the disk (with the exception of accessing the \texttt{/proc} filesystem on Linux), the server part has to keep some persistent state. The naming conventions or internal data format of the files are not meant to be known by profiler users, but you may want to do a backup of the configuration, or move it to another machine. On Windows settings are stored in the \texttt{\%APPDATA\%/tracy} directory. All other platforms use the \texttt{\$XDG\_CONFIG\_HOME/tracy} directory, or \texttt{\$HOME/.config/tracy} if the \texttt{XDG\_CONFIG\_HOME} environment variable is not set. \subsection{Root directory} Various files at the root configuration directory store common profiler state such as UI windows position, connections history, etc. \subsection{Trace specific settings} \label{tracespecific} Trace files saved on disk are immutable and can't be changed, but it may be desirable to store additional per-trace information to be used by the profiler, for example a custom description of the trace, or the timeline view position used in the previous profiling session. This external data is stored in the \texttt{user/[letter]/[program]/[week]/[epoch]} directory, relative to the configuration's root directory. The \texttt{program} part is the name of the profiled application (for example \texttt{program.exe}). The \texttt{letter} part is a first letter of the profiled application's name. The \texttt{week} part is a number of weeks since the unix epoch, and the \texttt{epoch} part is a number of seconds since unix epoch. This rather unusual convention prevents creation of directories with hundreds of entries. User settings are never pruned by the profiler. \newpage \appendix \appendixpage \section{License} \verbatiminput{../LICENSE.} \section{List of contributors} \verbatiminput{../AUTHORS.} \section{Inventory of external libraries} The following libraries are included with and used by the Tracy Profiler. Entries marked with a \faStar{}~icon are used in the client code. \begin{itemize} \item 3-clause BSD license \begin{itemize} \item getopt\_port -- \url{https://github.com/kimgr/getopt\_port} \item libbacktrace \faStar{} -- \url{https://github.com/ianlancetaylor/libbacktrace} \item Zstandard -- \url{https://github.com/facebook/zstd} \item capstone -- \url{https://github.com/aquynh/capstone} \end{itemize} \item 2-clause BSD license \begin{itemize} \item concurrentqueue \faStar{} -- \url{https://github.com/cameron314/concurrentqueue} \item LZ4 \faStar{} -- \url{https://github.com/lz4/lz4} \item xxHash -- \url{https://github.com/Cyan4973/xxHash} \end{itemize} \item Public domain \begin{itemize} \item rpmalloc \faStar{} -- \url{https://github.com/rampantpixels/rpmalloc} \item gl3w -- \url{https://github.com/skaslev/gl3w} \item stb\_image -- \url{https://github.com/nothings/stb} \end{itemize} \item zlib license \begin{itemize} \item Native File Dialog -- \url{https://github.com/mlabbe/nativefiledialog} \item GLFW -- \url{https://github.com/glfw/glfw} \item IconFontCppHeaders -- \url{https://github.com/juliettef/IconFontCppHeaders} \item pdqsort -- \url{https://github.com/orlp/pdqsort} \end{itemize} \item MIT license \begin{itemize} \item Dear ImGui -- \url{https://github.com/ocornut/imgui} \item JSON for Modern C++ -- \url{https://github.com/nlohmann/json} \item robin-hood-hashing -- \url{https://github.com/martinus/robin-hood-hashing} \end{itemize} \item Apache license 2.0 \begin{itemize} \item Arimo font -- \url{https://fonts.google.com/specimen/Arimo} \item Cousine font -- \url{https://fonts.google.com/specimen/Cousine} \end{itemize} \item Font Awesome Free License \begin{itemize} \item Font Awesome -- \url{https://fontawesome.com/} \end{itemize} \item FreeType License \begin{itemize} \item FreeType -- \url{https://www.freetype.org/} \end{itemize} \end{itemize} \bibliographystyle{alpha} \bibliography{tracy} \end{document}
{ "alphanum_fraction": 0.7611215409, "avg_line_length": 76.3221409574, "ext": "tex", "hexsha": "46b20e11e77872e49c767387dedfb5a6ace3d546", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "dee808dd1b59d001f21702615cd586a435527598", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "ikrima/tracy", "max_forks_repo_path": "manual/tracy.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "dee808dd1b59d001f21702615cd586a435527598", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "ikrima/tracy", "max_issues_repo_path": "manual/tracy.tex", "max_line_length": 1295, "max_stars_count": null, "max_stars_repo_head_hexsha": "dee808dd1b59d001f21702615cd586a435527598", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "ikrima/tracy", "max_stars_repo_path": "manual/tracy.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 60607, "size": 229577 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % LaTeX Template % Version 1.1 (30/4/2014) % % Original author: % Debarghya Das (http://debarghyadas.com) % New author: % Pranav Ramarao \documentclass[]{resume-openfont} \begin{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LAST UPDATED DATE % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \lastupdated %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TITLE NAME % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \namesection{Pranav}{Ramarao}{ % \urlstyle{same}\url{http://linkedin.com/pranavramarao} \\ \Letter \href{mailto:[email protected]}{ [email protected] } \textbullet{} { \Mobilefone { +1 (734)-680-4390}} \\ Full Time Roles \textbullet{} Software Engineering Position \textbullet{} Graduating Dec 2017\\ } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % COLUMN ONE % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \begin{minipage}[t]{0.25\textwidth} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % EDUCATION %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Education} \subsection{University of Michigan} \descript{MS in Computer Science} \location{Exp Dec 2017 | Ann Arbor \\ GPA: 3.79} \sectionsep \textbullet{} TA for EECS 281 (Data Structures and Algorithms)\\ \textbullet{} Taught 120+ students\\ \textbullet{} Outstanding TA award \sectionsep \subsection{BITS Pilani} \descript{BE in Computer Science} \location{May 2015 | Hyderabad \\ GPA: 9.29} \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % SKILLS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Skills} \subsection{Focus Areas} \textbullet{} Algorithms \\ \textbullet{} Information Retrieval \\ \textbullet{} Machine Learning \\ \textbullet{} Distributed Systems \\ \textbullet{} Software Engineering\\ \sectionsep \subsection{Programming} \location{Proficient in:} C++ \textbullet{} C\# \textbullet{} Python \textbullet{} Java \textbullet{} SQL \textbullet{} Git \textbullet{} Shell\\ \location{IDE:} Visual Studio \textbullet{} Xcode\textbullet{} Eclipse \\ \sectionsep \subsection{Online Judges} Codechef rating: 1958 \\ Codeforces rating: 1701 \\ \textbullet{} Solved 500+ problems \\ \textbullet{} Won several awards in coding competitions \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % LINKS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Links} Github:// \href{https://github.com/pranavr93}{\custombold{pranavr93}} \\ LinkedIn:// \href{https://www.linkedin.com/in/pranavramarao}{\custombold{pranavramarao}} \\ \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % REFERENCES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{References} \descript{B. Ashok} Senior Director \\ Microsoft Research India \\ [email protected] \\ \sectionsep \descript{Suresh Parthasarathy} Senior Research Engineer \\ Microsoft Research, India \\ [email protected] % \sectionsep % \descript{Dr. David Paoletti} % EECS 281 Professor \\ % University of Michigan \\ % [email protected] %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % COLUMN TWO % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \end{minipage} \hfill \begin{minipage}[t]{0.74\textwidth} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % EXPERIENCE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Experience} \runsubsection{Google} \descript{| Software Engineering Intern } \location{June 2017 – August 2017 | Sunnyvale, California} \vspace{\topsep} % Hacky fix for awkward extra vertical space \begin{tightemize} \item Worked in the storage platforms team on bringing additional analytics metrics on HDD drives, such as throughput, tail latencies and DMA histograms into the current pipeline. \item Anomalies based on the above metrics are detected in drives across the Google fleet and alerts are raised for repair strategies at software/hardware level. \end{tightemize} \sectionsep % \runsubsection{University of Michigan} % \descript{| Graduate Student Instructor } % \location{Sept 2016 – May 2017 | Ann Arbor} % \begin{tightemize} % \item Teach EECS 281 (Algorithms and Data Structures) for undergrad students % \item Handle discussion sections (120+ students overall), office hours (assisted 200+ students), helped set lab assignments, projects and exams. % \end{tightemize} % \sectionsep \runsubsection{Microsoft Research} \descript{| Research Fellow } \location{July 2015 – July 2016 | Bangalore, India} \begin{tightemize} \item Engaged in end to end development of a new e-mail client (Email Insights) that supported powerful context based search, auto-completion, spell correction and fuzzy contact search. \item Demoed it at TechFest in Microsoft HQ - selected for Garage (public) release \item Public release and press links: \textbf{\href{http://www.zdnet.com/product/email-insights/}{Zdnet, }} \textbf{\href{https://www.microsoft.com/en-us/garage/profiles/email-insights/}{Microsoft Garage, }} \textbf{\href{http://www.pcworld.com/article/3170146/windows/microsofts-email-insights-finally-adds-some-useful-search-smarts-to-outlook.html}{Pcworld}} \item The work was published in ACM - SIGIR, 2016 \textbf{\href{http://dl.acm.org/citation.cfm?id=2911451.2911458}{(InLook: Revisiting Email Search Experience)}} \end{tightemize} \sectionsep \runsubsection{Microsoft Research} \descript{| Software Engineering Intern} \location{Jan 2015 – May 2015 | Bangalore, India} \begin{tightemize} \item Worked on ‘Debug Advisor’ - an information assistant that provided developers contextual information during bug resolution based on past data from similar solved bugs. \item Made use of Titan (graph database) and developed high performance algorithms for indexing 30 million records in a few hours and supported quick query on it. \end{tightemize} \sectionsep % \runsubsection{Google Summer of Code} % \descript{| Student Developer} % \location{June 2015 – Sept 2015 | Bangalore, India} % \begin{tightemize} % \item Closely interacted with the Mono project (Xamarin) team and built a code-visualization add-in in Xamarin Studio. % \item Made use of a language service (NRefactory6 - Roslyn) and developed layout algorithms for the same. % \end{tightemize} % \sectionsep \runsubsection{Gradbusters.com} \descript{| Co-Founder} \begin{tightemize} \item Lead a team of engineers in building a data centric platform that helps students aspiring to do their graduate studies in the US. \item The website provides intelligent tools that makes predictions and recommendations based on past data. \item The site had 100k+ visits and 7000 registered users within 6 months of launch. \end{tightemize} \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % PROJECTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Projects} % \runsubsection{Distributed Sharded Key Value Store} \descript{Distributed Sharded Key Value Store} \vspace{-7pt}\justify Built a sharded key value store backed by Paxos in C++14. The system supports linearizable consistency. Consistent hashing was used for key distribution. The KV-store can handle concurrent requests, failures of replicas and addition of new shards. \textbf{\href{https://github.com/pranavr93/sharded_key_value_store}{Github}} \sectionsep \descript{Piazza BOT} \vspace{-7pt}\justify Developed a smart bot for piazza- Jarvis that aimed to improve the efficiency of instructors and experience for students. The bot performed duplicate question detection, automated weekly FAQ generation and provided a smarter search. \textbf{\href{https://github.com/pranavr93/piazza_bot}{Github}} \sectionsep \descript{Google Summer of Code '15} \vspace{-7pt}\justify Closely interacted with the Mono project (Xamarin) team and built a code-visualization add-in in Xamarin Studio. Made use of a language service (NRefactory6 - Roslyn) and developed layout algorithms for the same. \textbf{\href{https://github.com/pranavr93/MDClassDiagram}{Github}} \sectionsep % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PATENTS AND PUBLICATIONS % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \section{Patents and Publications} % \begin{tightemize} % \item Pranav Ramarao, et al. “Contextual windows for general purpose applications” (Patent pending) % \item Pranav Ramarao, et al. “InLook: Rethinking Emails”, 39th International ACM SIGIR Conference on Research and Development in IR, Pisa, Italy % \textbf{\href{http://dl.acm.org/citation.cfm?id=2911451.2911458}{Link}} % \end{tightemize} % \sectionsep % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PATENTS AND PUBLICATIONS % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \section{Patents and Publications} % \begin{tightemize} % \item Pranav Ramarao, Suresh Iyengar, C.Pushkar, U. Raghavendra, B.Ashok, “Contextual windows for general purpose applications”, Application No MS \#359952.01/41827-8143IN (Patent pending) % \item Pranav Ramarao, Suresh Iyengar, C.Pushkar, U. Raghavendra, B.Ashok, “InLook: Rethinking Emails”, 39th International ACM SIGIR Conference on Research and Development in Information Retrieval, Pisa, Italy % \textbf{\href{http://dl.acm.org/citation.cfm?id=2911451.2911458}{Link}} % \item Pranav Ramarao, K Muthukumaran, D. Siddharth, N L Bhanu Murthy “Impact of Bug Reporter’s Reputation on Bug-fix Times”, International Conference on Information Systems Engineering (ICISE2016), Los Angeles, USA % \textbf{\href{http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7486274}{Link}} % \end{tightemize} % \sectionsep %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % ACCOMPLISHMENTS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{ACCOMPLISHMENTS} \begin{tabular}{rll} 2016 & Won the company-wide Hackathon at Microsoft in the Internet Category \\ 2015 & Pending Patent: "Contextual windows for General Purpose Applications"\\ 2014 & Winner of Google Codejam India as part of the GDG conference \\ \end{tabular} \sectionsep \end{minipage} \end{document} \documentclass[]{article}
{ "alphanum_fraction": 0.6810556465, "avg_line_length": 39.4193548387, "ext": "tex", "hexsha": "6ec572ed1badebc5fc28b1695529cea5d952deaf", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "52245ec884e58f5c4f424b7e66de49924654c656", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "pranavr93/resume", "max_forks_repo_path": "main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "52245ec884e58f5c4f424b7e66de49924654c656", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "pranavr93/resume", "max_issues_repo_path": "main.tex", "max_line_length": 346, "max_stars_count": null, "max_stars_repo_head_hexsha": "52245ec884e58f5c4f424b7e66de49924654c656", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "pranavr93/resume", "max_stars_repo_path": "main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2491, "size": 9776 }
\documentclass[main]{subfiles} \begin{document} %@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ % summarizes lecture % author: \subsubsection{Merge Sort} \renewcommand{\arraystretch}{1.5} \definecolor{lgray}{gray}{0.95} \definecolor{gray}{gray}{0.9} \rowcolors{1}{lgray}{gray} Conceptually, a merge sort works as follows: \begin{enumerate} \item Divide the unsorted list into n sublists, each containing 1 element (a list of 1 element is considered sorted). (Time: O(n), Space: O(n) total) \item Repeatedly merge sublists to produce new sorted sublists until there is only 1 sublist remaining. This will be the sorted list. (Time: O(n log(n), Space: O(n) total, O(n) auxiliary) \end{enumerate} \begin{tabular}{ll} Worst case performance & O(n log n)\\ Best case performance & O(n log n) typical,O(n) natural variant\\ Average case performance & O(n log n)\\ Worst case space complexity & O(n) total, O(n) auxiliary\\ \end{tabular} \scriptsize \todo[inline]{Separate it properly into split and merge as in C++.} \begin{lstlisting}[language=Java] public int[] mergeSort(int array[]) // pre: array is full, all elements are valid integers (not null) // post: array is sorted in ascending order (lowest to highest) { // if the array has more than 1 element, we need to split it and merge the sorted halves if(array.length > 1) { // number of elements in sub-array 1 // if odd, sub-array 1 has the smaller half of the elements // e.g. if 7 elements total, sub-array 1 will have 3, and sub-array 2 will have 4 int elementsInA1 = array.length / 2; // we initialize the length of sub-array 2 to // equal the total length minus the length of sub-array 1 int elementsInA2 = array.length - elementsInA1; // declare and initialize the two arrays once we've determined their sizes int arr1[] = new int[elementsInA1]; int arr2[] = new int[elementsInA2]; // copy the first part of 'array' into 'arr1', causing arr1 to become full for(int i = 0; i < elementsInA1; i++) arr1[i] = array[i]; // copy the remaining elements of 'array' into 'arr2', causing arr2 to become full for(int i = elementsInA1; i < elementsInA1 + elementsInA2; i++) arr2[i - elementsInA1] = array[i]; // recursively call mergeSort on each of the two sub-arrays that we've just created // note: when mergeSort returns, arr1 and arr2 will both be sorted! // it's not magic, the merging is done below, that's how mergesort works :) arr1 = mergeSort(arr1); arr2 = mergeSort(arr2); // the three variables below are indexes that we'll need for merging // [i] stores the index of the main array. it will be used to let us // know where to place the smallest element from the two sub-arrays. // [j] stores the index of which element from arr1 is currently being compared // [k] stores the index of which element from arr2 is currently being compared int i = 0, j = 0, k = 0; // the below loop will run until one of the sub-arrays becomes empty // in my implementation, it means until the index equals the length of the sub-array while(arr1.length != j && arr2.length != k) { // if the current element of arr1 is less than current element of arr2 if(arr1[j] < arr2[k]) { // copy the current element of arr1 into the final array array[i] = arr1[j]; // increase the index of the final array to avoid replacing the element // which we've just added i++; // increase the index of arr1 to avoid comparing the element // which we've just added j++; } // if the current element of arr2 is less than current element of arr1 else { // copy the current element of arr2 into the final array array[i] = arr2[k]; // increase the index of the final array to avoid replacing the element // which we've just added i++; // increase the index of arr2 to avoid comparing the element // which we've just added k++; } } // at this point, one of the sub-arrays has been exhausted and there are no more // elements in it to compare. this means that all the elements in the remaining // array are the highest (and sorted), so it's safe to copy them all into the // final array. while(arr1.length != j) { array[i] = arr1[j]; i++; j++; } while(arr2.length != k) { array[i] = arr2[k]; i++; k++; } } // return the sorted array to the caller of the function return array; } \end{lstlisting} \end{document}
{ "alphanum_fraction": 0.6835214447, "avg_line_length": 37.2268907563, "ext": "tex", "hexsha": "a23ee8035b38c9be6bc0a017941d8396ba581816", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b3146dab81155feda76b78359686e2d94fc55217", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "benelot/Java.util-cheatsheet", "max_forks_repo_path": "Algorithm-Merge-Sort.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b3146dab81155feda76b78359686e2d94fc55217", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "benelot/Java.util-cheatsheet", "max_issues_repo_path": "Algorithm-Merge-Sort.tex", "max_line_length": 187, "max_stars_count": null, "max_stars_repo_head_hexsha": "b3146dab81155feda76b78359686e2d94fc55217", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "benelot/Java.util-cheatsheet", "max_stars_repo_path": "Algorithm-Merge-Sort.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1272, "size": 4430 }
% Options for packages loaded elsewhere \PassOptionsToPackage{unicode}{hyperref} \PassOptionsToPackage{hyphens}{url} % \documentclass[ ]{book} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{textcomp} % provide euro and other symbols \else % if luatex or xetex \usepackage{unicode-math} \defaultfontfeatures{Scale=MatchLowercase} \defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1} \fi % Use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} \IfFileExists{microtype.sty}{% use microtype if available \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \makeatletter \@ifundefined{KOMAClassName}{% if non-KOMA class \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt}} }{% if KOMA class \KOMAoptions{parskip=half}} \makeatother \usepackage{xcolor} \IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available \IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}} \hypersetup{ pdftitle={The Wisdom of Proverbs for my Children}, pdfauthor={Lucas Weeks}, hidelinks, pdfcreator={LaTeX via pandoc}} \urlstyle{same} % disable monospaced font for URLs \usepackage{longtable,booktabs} % Correct order of tables after \paragraph or \subparagraph \usepackage{etoolbox} \makeatletter \patchcmd\longtable{\par}{\if@noskipsec\mbox{}\fi\par}{}{} \makeatother % Allow footnotes in longtable head/foot \IfFileExists{footnotehyper.sty}{\usepackage{footnotehyper}}{\usepackage{footnote}} \makesavenoteenv{longtable} \usepackage{graphicx} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} % Set default figure placement to htbp \makeatletter \def\fps@figure{htbp} \makeatother \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} \usepackage{booktabs} \usepackage{amsthm} \makeatletter \def\thm@space@setup{% \thm@preskip=8pt plus 2pt minus 4pt \thm@postskip=\thm@preskip } \makeatother \usepackage[]{natbib} \bibliographystyle{apalike} \title{The Wisdom of Proverbs for my Children} \author{Lucas Weeks} \date{2020-08-19} \begin{document} \maketitle { \setcounter{tocdepth}{1} \tableofcontents } \hypertarget{introduction}{% \chapter{Introduction}\label{introduction}} At some point, I'd like to add an introduction. \hypertarget{the-woman-named-wisdom-calls-out}{% \chapter{\texorpdfstring{The Woman Named \emph{Wisdom} Calls Out}{The Woman Named Wisdom Calls Out}}\label{the-woman-named-wisdom-calls-out}} \hypertarget{proverbs-81-5}{% \section{Proverbs 8:1-5}\label{proverbs-81-5}} \begin{quote} Doesn't wisdom cry out?\\ Doesn't understanding raise her voice?\\ On the top of high places by the way,\\ where the paths meet, she stands.\\ Beside the gates, at the entry of the city,\\ at the entry doors, she cries aloud:\\ ``I call to you men!\\ I send my voice to the sons of mankind.\\ You simple, understand prudence!\\ You fools, be of an understanding heart! \end{quote} \textbf{Questions to consider} \begin{itemize} \tightlist \item What is Wisdom doing? Why? \item Where is she calling out? \item Who is she calling out to? \item What does \emph{naive} mean? \begin{itemize} \tightlist \item \emph{(of a person or action) showing a lack of experience, wisdom, or judgment} \end{itemize} \item What does \emph{prudence} mean? \begin{itemize} \tightlist \item \emph{acting with or showing care and thought for the future} \end{itemize} \end{itemize} This year Daddy wants to study the Proverbs together. Why? Because I want you children to be wise and not foolish. A wise man or woman will be protected and will be a blessing to everyone around him. But a foolish man will destroy himself and the people closest to him. God has given us this book of Proverbs to teach us how to be wise. So we should read it and study it carefully. \emph{Monday, August 17, 2020} \begin{center}\rule{0.5\linewidth}{0.5pt}\end{center} \hypertarget{proverbs-86-11}{% \section{\texorpdfstring{\emph{Proverbs 8:6-11}}{Proverbs 8:6-11}}\label{proverbs-86-11}} \begin{quote} Hear, for I will speak excellent things.\\ The opening of my lips is for right things.\\ For my mouth speaks truth.\\ Wickedness is an abomination to my lips.\\ All the words of my mouth are in righteousness.\\ There is nothing crooked or perverse in them.\\ They are all plain to him who understands,\\ right to those who find knowledge.\\ Receive my instruction rather than silver,\\ knowledge rather than choice gold.\\ For wisdom is better than rubies.\\ All the things that may be desired can't be compared to it. \end{quote} \textbf{Questions to consider} \begin{itemize} \tightlist \item Why do we have to be told to listen? Do we like to hear the truth? \item Can you be good without telling the truth? Can you tell the truth and be wicked? \item What is the difference between wisdom and knowledge? \item How valuable is the truth? How valuable is wisdom? \end{itemize} We must be exhorted to listen because we don't want to. We like to tell ourselves lies, and we often like to listen to other people tell us lies. Some people whant to separate goodness from truthfulness. They say you can speak lies but still be good. That's a lie. God is Righteous \textbf{and} True. There is no separating those two characteristics with Him. \emph{Tuesday, August 18, 2020} \begin{center}\rule{0.5\linewidth}{0.5pt}\end{center} \hypertarget{proverbs-812-13}{% \section{Proverbs 8:12-13}\label{proverbs-812-13}} \begin{quote} 12 ``I, wisdom, have made prudence my dwelling.\\ Find out knowledge and discretion.\\ 13 The fear of Yahweh is to hate evil.\\ I hate pride, arrogance, the evil way, and the perverse mouth. \end{quote} \textbf{Questions to consider} \begin{itemize} \tightlist \item What does \emph{prudence} mean, again? \begin{itemize} \tightlist \item Remember that \emph{prudence} means, \emph{acting with or showing care and thought for the future}. \end{itemize} \item We had a conversation yesterday about the importance of delayed gratification. What does delayed gratification have to do with prudence? \item What does it mean for wisdom to have a home in ``prudence?'' \item What does \emph{discretion} mean? Why is it important? \begin{itemize} \tightlist \item \emph{Discretion} means acting or speaking in a way that avoids causing offense or revealing private information. \end{itemize} \item What things do you hate? \item Do you naturally hate evil? Or do you have have to learn to hate evil? \item What is the fear of the Lord/Yahweh? Why is does hating evil show that you fear the Lord? \item What does God hate? \end{itemize} To learn wisdom, we must learn all three things: how to think, how to talk, and how to act. We must have our emotions trained \emph{as well as} our minds. There is no separating the two of them, no matter how hard we might try. \emph{Wednesday, August 19, 2020} \begin{center}\rule{0.5\linewidth}{0.5pt}\end{center} \hypertarget{proverbs-815-21}{% \section{Proverbs 8:15-21}\label{proverbs-815-21}} \begin{quote} 14 Counsel and sound knowledge are mine.\\ I have understanding and power.\\ 15 By me kings reign,\\ and princes decree justice.\\ 16 By me princes rule,\\ nobles, and all the righteous rulers of the earth.\\ 17 I love those who love me.\\ Those who seek me diligently will find me.\\ 18 With me are riches, honor,\\ enduring wealth, and prosperity.\\ 19 My fruit is better than gold, yes, than fine gold,\\ my yield than choice silver.\\ 20 I walk in the way of righteousness,\\ in the middle of the paths of justice,\\ 21 that I may give wealth to those who love me.\\ I fill their treasuries. \end{quote} \end{document}
{ "alphanum_fraction": 0.7513215083, "avg_line_length": 32.3688212928, "ext": "tex", "hexsha": "a0fc3a33ada10c544e57f8b8a1f3732fe696b155", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "dcd6d01619dc1cd00f66bdd7356455248b2af2e7", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "ldweeks/The-Wisdom-of-Proverbs-For-My-Children", "max_forks_repo_path": "docs/the-wisdom-of-proverbs-for-my-children.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "dcd6d01619dc1cd00f66bdd7356455248b2af2e7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "ldweeks/The-Wisdom-of-Proverbs-For-My-Children", "max_issues_repo_path": "docs/the-wisdom-of-proverbs-for-my-children.tex", "max_line_length": 359, "max_stars_count": 1, "max_stars_repo_head_hexsha": "dcd6d01619dc1cd00f66bdd7356455248b2af2e7", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "ldweeks/The-Wisdom-of-Proverbs-For-My-Children", "max_stars_repo_path": "docs/the-wisdom-of-proverbs-for-my-children.tex", "max_stars_repo_stars_event_max_datetime": "2020-08-18T23:35:49.000Z", "max_stars_repo_stars_event_min_datetime": "2020-08-18T23:35:49.000Z", "num_tokens": 2590, "size": 8513 }
\input{Templates/Voorpagina} \begin{document} \auteur{Kwinten Missiaen, Steven Thuriot, Koen Van den dries, Bart Vangeneugden} \opleiding{Methodologies for Design of Software} \titel{Taskmanager: Iteration 3} \academiejaar{2009 - 2010} \promotor{Tom Holvoet\\Mario Cruz Torres} \maakvoorblad \newpage \thispagestyle{empty} \mbox{} \newpage \maakvoorblad \newpage \tableofcontents \newpage \part{Introduction} This document serves as a documentation instrument for the MOP Team Assignment.\\ In the following chapters, the reader will get a general overview of the outer- and inner workings of the application, accompanied by several diagrams. The report is made up of two parts. The first part contains all the design decisions we made in the first and second iteration of the project. In part 2 we will discuss how we handled the new requirements and how we changed our application to meet those requirements. We will go in depth about the structure of our code. We will discuss in detail just which classes have which responsibility, and why. Also our testing approach is explained in short.\\ We conclude with our team organization, planning and a short self-evaluation. \part{Iteration 1 + 2} \section{System Operations} \subsection{Task Management} The system revolves around Tasks. There are many different entities to keep in mind such as Users, Resources or Projects. But all of these things somehow have to do with Tasks itself. \subsubsection{Creation of tasks} When creating a Task, the User asked to give details about the task at hand. The user is expected to enter a short description, the start time of the task, the deadline and the average duration of the task. Also, the user is presented with a list of resources and other tasks already in the system. The user then selects a few dependencies for the task he's creating and resources he wishes to allocate. When creating a task, the user has to make sure he does not violate the business rule when creating this Task. This means the user enters a duration, start date and end date. The start date must come before the end date and the difference between end- and start date has to be larger or equal then the duration. Also the systems tests if the entered description is an empty string. If this happens, an error is shown.\\ \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/ssd_create_task.jpg} \end{center} \caption{System Sequence Diagram describing the creation of a task} \end{figure} \subsubsection{Removing Tasks} When a task is to be removed. It first has to check how that will affect other entities in the system. Is a Task still required by other Tasks in a dependency? If this is the case. The user will receive an error message and is asked how he wants to proceed: Cancel the operation or delete all the dependent tasks. \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/ssd_remove_task.jpg} \end{center} \caption{System Sequence Diagram describing the removal of a task} \end{figure} \subsubsection{Modifying Tasks} When modifying tasks, the system has to make sure the user follows all the rules described above. This is because the user has the option to change all of the schedule variables, dependencies and required resources. This means that the Business Rules have to be tested, as well as the Empty Description rule. Checking is also done on the task dependencies. For instance, it should not be possible to create a task A, dependent on task B. And modify task B to be dependent on task A. This would create a dependency loop.\\ \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/ssd_modify_task.jpg} \end{center} \caption{System Sequence Diagram describing the editing of a task} \end{figure} \subsubsection{Updating a Task status} Updating a status of a task is integrated in modifying a task, but requires special attention as many different rules apply when adjusting the status of a Task. Updating the status can directly reflect the status of dependent tasks. When a task was marked Successful, but is reverted to Failed or Unfinished the user will be asked to update the status of all dependent tasks or leave everything including this tasks status unchanged. This is because may have to revert back to Failed or Unfinished because they depended on the successful completion of this task. \subsection{Getting a task overview} \emph{ When asking for a overview of tasks, it is sometimes easy to sort and/or filter tasks in a different manner. That way you can get a better overview.\\ We provided a handy interface for this in the way of a loop. The user can choose how he wants his tasks sorted: by deadline of by duration. In case the user selects to sort the tasks by deadline, he is asked how many tasks are to be shown. Is duration selected, a minimum and maximum duration is asked. It's made particularly easy to alter and/or add sorting and filtering methods.\\ \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/ssd_focus_work.jpg} \end{center} \caption{System Sequence Diagram describing the overview of all tasks} \end{figure} } \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/ssd_update_task.jpg} \end{center} \caption{System Sequence Diagram describing the updating of the status of a Task} \end{figure} \subsection{Resource Management} \subsubsection{Creating Resources} A user can create a resource. When this resource is created, it is added to the system and stored there for later use. The system will only check for a valid description. This means the description can not be empty. Once created, the resource is available for binding to Tasks, or Reservations. \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/SSD_Create_Resource.png} \end{center} \caption{System Sequence Diagram describing the creating of a new resource} \end{figure} \subsubsection{Remove Resource} A Resource can be removed. However, the system will first check it's dependency's with Task objects and/or Reservations. If the Resource is required by any of these, the system can not remove the Resource. \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/SSD_Remove_Resource.png} \end{center} \caption{System Sequence Diagram describing the removing of a resource} \end{figure} \subsection{Reservations} \subsubsection{Create Reservations} Once a Resource is created, the User has the option to make a Reservation for that Resource. When creating a Reservation, the user is asked for the period of time he wants to make the Reservation after being shown a list of current Reservations for the Resource. After the user enters this data, the system controls the entered data to see if it does not conflict with previous Reservations. \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/SSD_Make_Resource_Reservation.png} \end{center} \caption{System Sequence Diagram describing the making of a reservation} \end{figure} \subsection{Project Management} \subsubsection{Create Project} A project can be created. A project can contain many Tasks, but does not have to. The system asks the user for a short description of the Project. If this description was not empty, the system creates the Project. \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/SSD_Create_Project.png} \end{center} \caption{System Sequence Diagram describing the creating of a project} \end{figure} \subsubsection{Remove Project} A project can be removed without too many details. If the user wants to remove a project, all of the tasks connected to that project are removed. The user simply selects a Projects and all of the underlying tasks/dependent tasks are removed with it. \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/SSD_Remove_Project.png} \end{center} \caption{System Sequence Diagram describing the removing of a project} \end{figure} \subsection{User Interface} \begin{figure}[H] \begin{center} \includegraphics[width=1.0\textwidth]{images/gui.png} \end{center} \caption{Class diagram of the GUI} \end{figure} The project uses a text based UI. All use case are handled by a subclass of the abstract UseCase class, in the figure the use case for Create Task is given as example. MainGUI keeps a list of instance of these use cases. When the user initiates a use case MainGUI calls the corresponding use case, passing on the dispatch controller which will be discussed later, the use case then creates a new instance of itself with all field initialized to handle the rest of the use case. The class Menu handles the actual communication with the user through the console, formatting all the in and output. \section{Package Communication} The whole project is divided in four big packages. These packages are 'model', 'controller' and 'gui'. The last package is 'test', which has been separated from the rest of the project for obvious reasons. We chose to create these packages so everything would be separated from each other. It would be bad practice to let the GUI access the model in a direct way, since this would mean even a small change could result in massive changes throughout the GUI. This is where the controllers come in. They offer a way of communicating with the model. They basically contain a set of functionalities that are used throughout the program. These will then make the correct calls to the model. This way everything is handled without using direct calls, creating a more persistent system against changes throughout the model. All these controllers are finally instantiated inside one big container called the dispatch controller. This controller enables the GUI to just instantiate one controller. Its constructor will take care of the rest. The other controllers are stored inside it and can be accessed by calling the correct getters. All the controllers can then easily be passed along the different views using this one object. Class diagrams of different subsystems are included in the appendix in the end of this file. These include a class diagram of the controller subsystem and a class diagram of all model classes. \section{Class Descriptions} \subsection{User} At first, we intended the User class to be responsible for the creation of Projects and Tasks, because a User object contains a list of Tasks and Projects that belong to that User. In the end, we chose not to do so. We felt that that the indirection was not necessary and that it might not benefit the cohesion of the User class. While not described in the project assignment, we assumed the system could become a multi-user system in the next iteration(s). We therefore based our design on this. Every User object is responsible for keeping track of the Tasks that belong to him. The system can ask the User object to return a list of these Tasks. \subsection{Task manipulation} %Design uitleggen. Ook de GRASP patterns erbij zetten Tasks are collected in the User. They contain a list of Resources the User might require to execute the Task. A Task has attributes which define it's own description, a Start and End time as well as a Duration. Also, a Task has a list of Tasks on which the Task may depend. This means the status of the Task at hand is dependent on the Status of it's dependent Tasks.\\ Keeping Low Coupling and High Cohesion in mind, Task has the following responsibilities: \begin{itemize} \item{Keeping track of its name, start date, due date, duration and status} \item{Keeping track of the resources required to execute the task} \item{Keeping track of its dependencies. Dependencies are implemented as double bindings, so they should be kept consistent} \item{Checking for the business rule 1, 2 and 3 and preventing the construction of loops in the dependency scheme} \item{Updating the status of dependent tasks when necessary} \end{itemize} A Task is not an Information Expert or Creator of Resources. As described in the next chapter, a Resource has its own management. We do feel it is necessary for the Task to know about its Resources.\\ However, a Task is Information Expert about Task itself. Therefore, we felt that the Task class should be responsible for enforcing the business rules, preventing the construction of dependency loops, and updating the status of dependent tasks when necessary (as described in the use case 'update task status'). While designing, it was suggested that perhaps the Task class has two distinct responsibilities this way: one concerning its own details, and one concerning the way it interacts with other Tasks in the dependency graph. In the end, we chose to stick to one single Task class, as we felt that the cohesion of this class was sufficiently high. \subsubsection{Task dependency manager} \emph{ In the second iteration, we chose to have a second object manage the dependencies of tasks. A new class TaskDependencyManager was created. A Task object aggregates an instance of TaskDependencyManager at all times. All operations that concern only dependencies are delegated to this object.} \emph{ On one hand, this creates a strong coupling between the Task and the TaskDependencyManager classes. On the other hand, we felt that the Task class was getting too bloated, and that it was responsible for too many things. Delegating some operations to the TaskDependencyManager helps to improve the cohesion of the Task class.} \begin{figure}[h] \begin{center} \includegraphics[scale=0.5]{images/doAddDependency} \end{center} \caption{\emph{Sequence diagram to add a dependency to a Task}} \end{figure} \emph{ In the diagram is shown how adding a dependency to a Task works now. The Task object delegates this action to its TaskDependencyManager. The TaskDependencyManager then cooperates with the TaskDependencyManager of the second Task, to make sure the double binding is kept consistent. Note: the diagram uses the operation 'doAddDependency()', not 'addDependency()'. This is because the operation 'addDependency()' is first delegated to the TaskState class. This is explained later in the report.} \subsection{Resource manipulation} %Design uitleggen. Ook de GRASP patterns erbij zetten Resources can be accessed via Tasks. This is because a Task can define which resources are required for that Task. However when a Resource is just created, or when a Task to which a Resource was allocated gets removed, the Resource will not be referenced to by any Task. The object would not exists. \emph{A resource is therefore stored in a RepositoryManager. This is explained in the section about 'Collecting Data'.} A Resource itself is an Information Expert as well as a Creator for Reservations. We decided to put the responsibility of creating and storing Reservations in Resource because a Resource needs this information to check it's own availability, as displayed in the diagram 'Create Reservation'. \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/create_reservation.jpg} \end{center} \caption{Create Reservation Sequence Diagram} \end{figure} \subsection{Project manipulation} \emph{A project is an instance that could contain Tasks. It does not have any binding to other objects and can not be stored in an object. Therefore, a Project is stored the same way a Resource is. By using repositories. See the subsection 'Collecting Data'.} \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/project_class_diagram.jpg} \end{center} \caption{Project Class Diagram} \end{figure} The responsibilities of the Project class are as follows: \begin{itemize} \item Keeping track of its own details (description) \item Keeping track of the Tasks that are in the project \item Binding or removing Tasks from or to the project \end{itemize} \emph{Because an instance of type Project is not stored in any other domain class. We feel that none of these domain classes should therefore act as a Creator of the Project instances. We therefore opted to call the Project constructor in the controller, which then acts as a factory. This controller will also add the instance to the repositories.} \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/create_project.jpg} \end{center} \caption{Create Project Sequence Diagram} \end{figure} \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/assign_task_to_project.jpg} \end{center} \caption{Assign Task to Project Sequence Diagram} \end{figure} It is also the Project's responsibility to bind a Task to the Project. We feel that this direction should be maintained because a Project 'contains' or 'aggregates' a Task. This binding is done by calling a method in Project. \section{Software Structures} \subsection{Task states} \subsubsection{State Pattern} \emph{When receiving the new iteration, it instantly became clear that from now on states would play a big role in the design. Every state has its specific way of handling things. Without using a design pattern to solve this, the methods would become cluttered and unreadable really fast. They would also be very hard to maintain when states are removed or new states are created.} \begin{figure}[H] \begin{center} \includegraphics[scale=0.6]{images/State_Pattern.png} \end{center} \caption{State Pattern Diagram} \end{figure} \emph{The perfect way to cope with all this is by using the state pattern. The state pattern offers all the functionality without having a lot of if structures inside the methods. This is done by creating an abstract class with all the methods from task that depend on the state of the task. By default, these methods get implemented in the abstract class so they throw an exception, saying the current call is not allowed in the current state. This abstract class has one child for each possible state. The children will then overwrite the methods that they handle differently. As a result, the children will have a clear overview of all the methods that they allow, and have their own unique way to do so.} \emph{The task object has an instance of one of the children of the abstract state object. When one of these methods is called, the task object will simply redirect the call to the state object. Since the state object is a child, in other words a specific state, it will handle it in a correct manner for the current state.} \emph{The state object also has a reference to the task object it belongs to. This is needed when the state needs to change. It is very important that the state pattern itself handles the state change. If this was not the case, separating all the states would become completely useless. This is because it is quite possible that a certain state would not allow to go to another state. In our case, for instance, it is not allowed to set a successful task back to unfinished.} \emph{We currently have four states, Available, Unavailable, Successful and Failed. Obviously, when a state is made, it will start out as either Available or Unavailable. Because the assignment was quite open about this, we had to think about what state changes we would allow throughout the system. We thought it would be best if Available and Unavailable were the only states that was allowed to change to another state. This would make it a lot easier for us to implement, because when a state would go from failed to available for instance, quite a lot of extra code is needed to figure out if the dependent tasks are failed because the the task you are changing was failed or for any other possible reason. In our current implementation, when, for instance, a task is failed and it needs to be changed, it is up to the user to create a brand new task. In the following diagram, you can see what state changes are allowed.} \begin{figure}[H] \begin{center} \includegraphics[scale=0.7]{images/StateChanges.png} \end{center} \caption{State Changes Graph} \end{figure} \emph{As you can see are the state changes between Available and Unavailable the same. Because of this, we began to wonder what else was similar. After some thorough research, it became clear that the only difference between these two states was time related. Because of this, we decided to merge these two states into one big state, called "`Unfinished"'. This state would just calculate at runtime if it is available or not, and act accordingly. To eventually set the state, a method is added per state. There was also a parsing method created, to make it possible for the XML Parser to pass strings found in the XML file to the state. The state would then call one of the methods to change the state if it matches one of the existing states. If not, an exception is thrown. We decided to place this method into the state pattern itself, rather than the XML parser, because this way it is easier to maintain and harder to forget to adjust when a new state is made.} \emph{The original idea was to put Task, TaskState and all its children into a separate package inside the model package. Since all the methods used in TaskState are protected or private, rather than public, it would be impossible for any other class to call functions of the state. This however, was not possible. This is due to one of Java's limitations. Java has no "real" sub packages. They all count as different packages. Because of this, Java does not allow to call protected methods from a sub package. Sadly, this is exactly what some of Tasks methods needed to do. Because of this, we were forced to implement the state pattern inside the model package as well. A class diagram of the different States is included in the appendix.} \subsubsection{Observer Pattern} \emph{When the state of a task changes this may invalidate Business Rule 2, so when a state is changed other tasks may have to change state as well. We made it the responsibility off the task it self to make sure it's state is consistent with the state of it's dependencies. For this we used the observer pattern to notify all dependent task of a state change so that they can check the new state and change their state accordingly.} \emph{At first, because one of the use-cases required to test which dependent task would change state as a result of the change of state of a task without actually changing the states, we considered using the push model. This would allow us to notify an observer of a state change, the new state where upon it would report back if and how it and it's dependent tasks would have changed state without changing state. This however would have limited the re-usability of the observer pattern. But as the use-case requirement could be solved in the UI the observer pattern was reverted to the pull model.\\ The two entities of the pattern are presented by the Subject and Observer<S extends Subject> interfaces using generics to improve re-usability of the interfaces. Sadly, the type-erasure implementation of generics in Java only permits an interface to be implemented once by a class, even with different type arguments. A possible solution to this would have been that for each type implementation of the interface we make a sub-interface which implements the type and then implement the interface in the class. This however would defeat the purpose of the generic super-interface.\\ As the Subject-Observer relationship is an implicit aspect of the task dependency relationship no subscribe(Observer) method is needed. While it is the TaskDependecyManager that maintains the dependency relationship we still chose to make Task both the Observer and Subject as it is Task that keeps the current state. When the state of a task is changed the publish() method is called to notify() all observing tasks, which will then decide whether they will update their own state. In order to comply with Business Rule 2 the observing task only needs to change status if the subject state is Failed. The update(Task subject) method of the observer is passed the subject which published the state change, but the observer still needs to verify if it is actually subscribed to the subject to ensure consistency.} \begin{figure}[H] \begin{center} \includegraphics[scale=0.7]{images/Observer_Pattern.png} \end{center} \caption{Observer Pattern Sequence Diagram} \end{figure} \emph{In the previous iteration we used a recursive approach. We basically used the push model observer pattern, but with the subject and observer squished into one.} \subsection{Task overview} \emph{ As discussed, we want to show the list of tasks in different ways. We might want them sorted by deadline or by duration. However, there is no reason the functionality should be limited to just these two.\\ Also, there is no reason to clutter the controller with these algorithms. We tried to find a way to split up the following functionalities: \begin{itemize} \item{Getting a list of tasks} \item{Sorting that list} \item{Filtering} \item{Returning the list} \end{itemize} We found our solution in the Strategy pattern: We start off by creating an instance of FocusWork. This instance is injected with an implementation of FocusStrategy and the current User. The instance of FocusWork will be responsible for getting the list of tasks. Since the sorting and filtering can differ from strategy to strategy, they are handled by the injected instance of FocusStrategy. That way, neither controller nor FocusWork (context) are responsible, or even aware, of how these algorithms work.\\ There was however the issue of creating an instance of FocusWork. We found it bad design to let both GUI nor TaskController call the constructor of an implementation of FocusStrategy. This would mean higher coupling between those classes. It would also mean a larger footprint when adding new FocusStrategies, more classes had to be adjusted. We therefore opted for using the Factory Method pattern to create instances of FocusWork with an injected Strategy.\\ The class FocusFactory takes care of Strategy constructors and injecting. The GUI now simply calls a method in this class, a prefabricated FocusWork is returned. A class diagram of the FocusWork subsystem is included in the appendix.\\ All this combined makes sure that when we create a new implementation of FocusStrategy, the footprint is returned to a minimum: We have to adjust the GUI to make sure it asks the right questions, and the FocusFactory is adjusted so right constructor is called in the right way.\\} \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/FocusFactory.jpg} \end{center} \caption{Create a type of FocusWork with an injected Strategy according to a given Type} \end{figure} \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/FocusStrategy.jpg} \end{center} \caption{Get a list of tasks, manipulated by a certain Strategy. This strategy is interchangeable} \end{figure} \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/focus_class_diagram.jpg} \end{center} \caption{Specialized class diagram explaining the Strategy Pattern} \end{figure} \subsection{Time representation} \emph{ Because Business Rule 3 relates so strongly to time, it seems natural to look for a way to represent time in our software system. As time progresses, the status of a Task may have to be changed to satisfy Business Rule 3. For example, if the deadline of a Task has passed and it was not finished yet, the Task was failed. This leaves us with two questions. How should we represent time in our system? How should we design the system so that Business Rule 3 can be uphold for all tasks at all time?} \emph{ To represent time, we created a class 'Clock'. This class represents a certain moment in time, and supports a few very basic operations, such as setTime() en getTime(). If necessary, this class can easily be expanded later to allow for more complex behavior. An instance of Clock is created when the system is started. This object is stored in the RepositoryManager (see next section) and is passed on to every Task that is created. As such, the Tasks can use this object to get the current system time. This allows them to check for Business Rule 3.} \begin{figure}[h] \begin{center} \includegraphics[scale=0.6]{images/setTime} \end{center} \caption{\emph{Set time sequence diagram}} \end{figure} \emph{ Of course, checking for Business Rule 3 is not enough. The system must make sure that Business Rule 3 is satisfied at all times. This means that Tasks must check Business Rule 3 whenever the time changes, and if necessary, change their status accordingly. To accomplish this, we used the Observer pattern, as shown in diagram 'Set time sequence diagram'. Whenever the operation 'setTime()' is called, the Clock object calls another operation 'publish()'. Next, the Clock object asks the RepositoryManager for a list of all Tasks (see next section), and calls 'update(this)' on all of them. The Task is now notified that the current system time has changed. It is now the responsibility of a Task object to reach a consistent state.} \emph{ We assumed that time can only go forward. Whenever 'setTime()' is called with a time before the current system time, an exception is thrown. This assumption seems very reasonable, and it greatly simplifies the behavior of the Clock and Task classes. A possible downside is that the system and its user interface may become less error friendly. If, by accident, the time is set in the distant future, it will be hard to revert this change.} \subsection{Collecting data} Certain types of objects (Resources, Projects) are not required to have a binding with other objects. They can be simply instantiated on their own.\\ In a domain based model, this was a problem. Objects would be instantiated without a place to put them, they would not be contained. In the first iteration, we solved this by using a Singleton manager for Resource. Projects would be bound to a User.\\ This seemed bad design, as Singleton was unreliable as an Information Expert. It was too general and would fail in most Unit tests. Storing the Project in the User also seemed bad design, as they originally had no connection to each other.\\ A solution was found in using Repositories. We created Generic Repositories for Project, Resource and User that were contained in a RepositoryManager. The RepositoryManager would act as an independent Information Expert and would manage objects in adding and removing them from their respective repositories.\\ By using overloaded add() and remove() operations, this RepositoryManager stayed easy to use and kept the underlying code abstract. We call this the Facade Pattern.\\ We use a technique borrowed from the Spring Framework called Dependency Injection to inject a reference to this RepositoryManager in every Controller. This way, they can all access the same information in an Object-Oriented Way. \section{Software Initialization} \emph{When the program initializes, the first thing that gets created is the RepositoryManager. This makes sure repositories are created for Project, Resource and User. This also makes sure a Clock is created and contained.} After that, an XML parser object is created to retrieve all the supplied information. The dispatch controller, together with the location of the XML file, are passed to the constructor of the parser. After this it can finally start parsing. The first thing it will do is find all the resources in the file. It will then instantiate the ResourceManager singleton and start adding these resources. Once that has been completed, it will start looking for projects. It will create all the found instances. After this it looks for all the tasks and, once again, creates them all. Finally all the tasks, resources and projects are bound according to the specifications in the XML file. It will finally return the user object found in the file. After this the GUI is instantiated. The user object from before is passed along with its constructor and then starts listening for any input. The system has now fully started and is ready for use. \newpage \part{Iteration 3} \section{New requirement and related changes} \subsection{Asset system} A new concept in this iteration is that of helper users and invitations. We had to design a new system to handle these concepts. Furthermore the resource and reservation concept had changed in this iteration. As the two concepts were fairly similar we looked into unifying these concepts in out model into an Asset system. \subsubsection{Invitation} In the third iteration users can assist others to execute tasks. This can be done by inviting the user in question. In our model the invitation provides a binding between Task and User, while also having a state. As the state is more informational and has no influence on functionality there was no reason to go with the state design pattern. Invitations are made through a constructor,which is provided the Task and User to set up the bindings between the two objects. The Invitation itself contains the methods to accept or deny an invitation. On both sides the invitations are handled by dedicated managers. On the Task side we have the TaskInvitationManager, which keeps all invitations extended for that task and has to check certain things before adding the invitation. For example, an invitation can not be made to the owner of the task, and a user can not be invited twice for the same task. The User side uses the UserTaskManager. This manager keeps all the invitations of a User as well as all tasks the user owns directly and provides ways to retrieve invitations filtered by state. \subsubsection{Reservation} The concept of a reservation has also changed from the previous iteration. Previously, a task would have required a specific resource. A reservation was a binding between a resource and a user. To check availability the task had to give the owner along to the resource, so it could check whether said user had a reservation at the right time. In the new iteration reservation forms a binding between a task and a resource, with the resource no longer directly required by a task, but instead indirectly through TaskTypeConstraints as mentioned later. While updating the system to match these changed requirements, changes had to be made throughout the model. This may hint at high coupling in our previous design, although changes to a fundamental concept in the domain model are expected to have a rather vigorous impact on the model. The implementation of Reservation would be similar to that of Invitation, with a TaskReservationManager on the task side. On the resource side however we opted out of a dedicated manager as resource has little functionality beyond maintaining it's reservations so we kept it in Resource itself. Use of a dedicated manager would have provided congruence to User, but was not perceived as important. \subsubsection{AssetAllocation} Both the reservation/invitation as well as the user/resource concept showed strong similarities regarding task requirements and User/ResourceType (see later), so we looked into unifying them into a generalized hierarchy. UserType and ResourceType have little functional differentiation so we can generalize them into AssetType. User and Resource both share the aspect that their AssetType is required for a task and that they can be allocated to satisfy these requirements. Invitation and Reservation both allocate an asset to a task, so we can generalize them into AssetAllocation. \begin{figure} \includegraphics[scale=0.5]{images/Asset.png} \caption{Class diagram of the Asset System} \label{asset} \end{figure} Even if there is an opportunity to unify the concepts, this by itself does not provide motivation to do so unless there are practical benefits to it. The main practical benefit is that a task is not required to distinguish between reservations and invitations. Instead of using the TaskInvitationManager mentioned above and making a similar TaskReservationManager we can use a generalized TaskAssetManager which works with AssetAllocations. Furthermore, the TaskTypeConstraints also don't have to distinguish between the constraints for UserTypes or ResourceTypes. In the theme XML (see Custom Themes and Initialization), the TaskTypeConstraints are specified similarly without distinction between User- and ResourceType. This promotes code reuse and may allow new kinds of asset allocations or new kinds of assets to be added to the model later, requiring little additional code, but this will be discussed in more detail later. \begin{table} \begin{tabular}{|p{4cm}|p{4cm}|p{4cm}|} \hline (Dis)advantages abstract superclass & (Dis)advantages interface & (Dis)advantages representative \\ \hline + Allows for default implementation of methods&&+ Allows for default implementation of methods\\ + Allows for protected methods&&+ Allows for protected methods\\ &+Only exposes specific aspect&+Only exposes specific aspect\\ &+Doesn't imply primary function is Asset&+Doesn't imply primary function is Asset\\ &+Doesn't interfere with class hierarchy&+Doesn't interfere with class hierarchy\\ \hline - Interferes with class hierarchy&&\\ - Implies primary function is Asset&&\\ &- Only public methods and fields&\\ &- No default implementation&\\ && -More complexity\\ \hline \end{tabular} \caption{Advantages and disadvantages of generalization forms} \label{generalization} \end{table} When generalizing we have the choice to several forms of generalization in Java (\ref{generalization}). First we can use an abstract superclass. This permits us to extract common methods and provide default implementations where preferred. It does however interfere with other inheritance as Java only permits single inheritance. If we for example in future iterations want to have a different user kind which doesn't have the asset aspect to it, this would be difficult as Asset is already the superclass of User. Further objections are semantic in nature. Making Asset a superclass implies that any subclass is first Asset, second differentiated. For Resource this isn't a problem, User however signifies a User of the system and owner of tasks in the first place with asset functionality having been only recently added. An alternative is using an interface. An interface avoids the inheritance and semantic issues. An interface only reveals that the class has certain functionality to it without implying importance. Downsides however are that methods and fields can only be declared public causing security risks and that no default implementation can be given. A third alternative would be to give the class a representative object which implements the interface or extends the superclass. In this case, it would be preferable to use a superclass, which represents the asset aspect of the class. While this provides the best of both worlds it does add extra complexity to both design and code. We opted for the interface for Asset and AssetType. For AssetAllocation, since its sole purpose is to allocate assets, there is no problem with semantics. This is why we went with an abstract superclass. In the domain model we have three nice parallel hierarchies: Asset, AssetType and AssetAllocation. These translate to the implementation without problem. When we look at their functional differentiation we find some redundancy. For AssetType, the interface is often used, but the implementation, UserType and ResourceType, are identical. We could just make a class of AssetType without reason for overriding. The only difference between User- and ResourceType is that the former can only be had by a User and vice versa. There is rarely an explicit distinction between user and resource types in the system. Only in the owner user constraint we explicitly use UserType. For Asset the interface is in practice redundant. At every instance where we use a Resource or User, we explicitly know it's kind, they are never approached as an Asset. In spite of these redundancies we chose to keep them. While there are few cogent arguments for their existence, there are fewer against. Keeping them provides the nice congruence between the hierarchies, as in the domain model. While implementing all this, the benefits and drawbacks became more apparent. The generalization and particularly the agnosticism in Task gives us low coupling, whereby parts of the system upwards of the AssetAllocation (TaskAssetManager, TaskTypeConstraint, XMLParser,...) need know nothing about the nature of the allocations, allowing new kinds of allocations and assets to be added in future with little work. But while most of the differences between invitation and reservation can be solved internally, there is some functionality which determines their behavior as an asset allocation, this functionality must be generalized, causing lower cohesion. First, an invitation, once issued, can be pending, accepted or rejected. This determines how it counts towards the constraints. Only when accepted does it count to the minimum constraint, but when accepted or pending it already counts towards the maximum constraint. A method had to be implemented which asks about this behavior, which would not have been the case for reservation, which is always counted towards the maximum constraint. reservation has a degree of freedom in time, where it decides its availability based on time. For invitation, \emph{isAvailable()} is independent of timing, but for reservation it isn't, so we have to pass along timings for reservation. This lower cohesion may be an advantage if in the future one would gain features of the other, but this is pure speculative. Reservation also has a restriction concerning overlap with other Reservations. This was solved by a \emph{checkProposedAllocation()} method where the Reservation passes itself along, to be checked by all already made allocations. Invitation doesn't have to do anything with this method, but reservation has to compare. As the reservation had lost it's type it required a rather nasty \emph{instanceof} before casting and comparing. This was ultimately solved by generalizing \emph{getStartTime()} and \emph{getEndTime()}. While these methods would not make sense for Invitation it passes back the task start- and duedate. This way the \emph{instanceof} could be avoided and while Invitation doesn't have any explicit timings, it does implicitly have the task timings for timings. To this point, while the generalizations caused lower cohesion by bleeding some functionality from Invitation to Reservation and vice versa, it could be justified with lower coupling. The future-proofness of this design is uncertain. New allocation and asset kinds could be added without needing much change. However, if these new kinds require additional functionality to be generalized, as we had to do for invitation and reservation, this would lower cohesion further to the point where it is no longer justifiable. The design also supports only limited dependency between asset allocations. If we had to add something like a queued invitation, which is not extended untill another invitation for the same UserType is rejected, this would need to be handled on the level of the TaskAssetManager, breaking its agnosticism. If such events were to happen in the future, we would have to break-up the unification aswell as the classes higher-up using the allocations, splitting up the TaskAssetManager and TaskTypeConstraints. This would require significant additional coding, however this would be code which we currently avoided, so that net losses are nill. If it holds, we win code reuse, if it breaks we end up neutral. Basicaly it's a ``enjoy it while you can''. \subsection{Custom Themes} An important new feature in the third iteration is that the task manager should be customizable to match specific task domains. When the application starts, a custom theme is loaded, which is then used throughout the program. This custom theme contains customization for tasks, resources and users. Each of these now belongs to a specific type. In particular, the type of a task specifies many of the requirements for a task. It may for example specify the need for a resource or helper user of a given type. \subsubsection{General implementation} Both tasks, resources and users are divided into specific types. These types are specified in the custom theme, and as such can be different for each execution of the program. Corresponding to these types are the software classes TaskType, ResourceType and UserType. These types are created when the application is started and the XML file is parsed. At the moment, it is not possible to create or modify the types after the theme is loaded. Please note that a given type corresponds to an object, not a class. For example, a ResourceType object may specify a type of resource called \emph{computer}. This is absolutely required to cope with the customization. The types are referenced by the corresponding classes, ie a Resource object has an attribute of class ResourceType, a User an attribute of class UserType and a Task an attribute of class TaskType. These attributes allow for easy and straightforward testing of types. For example, suppose there is a UserType named \emph{developer} and we have to check whether a given User \emph{john} is of this type. All we have to do then is call the User.getType() method on the user \emph{john} and check that it references the UserType \emph{developer}. \subsubsection{TaskType dependent requirements} The meaning of the custom types becomes apparent in the requirements for tasks and tasktypes. Every task is of a given task type. In the task's type is specified what types of resources and what types of helper users are needed, as well as the types of users that are allowed to start a task of the given type. Because these requirements are inherent to the task type and not to the specific task at hand, we decided that they are best kept and checked for in the TaskType class. This class TaskType specifies the fields that must be given for a certain TaskType, as well a list of constraints, which in turn are represented by objects of the TaskTypeConstraint class. Every TaskTypeConstraint object can check whether the constraint it represents is satisfied, making use of the checkConstraint() method. This method takes an argument of type Task, as well as a duration and a start date, and returns a boolean indicating whether the constraint is satisfied (the required assets are allocated) during the given time interval. The method is polymorph, treating both Resource and User objects as Asset. However, the constraint specifying what user types are allowed for a given task is not specified in a TaskTypeConstraint object, but is tested for directly in the TaskType.checkOwner() method. Alternatively, we could have made two subclasses of the abstract TaskTypeConstraint class, specifying helper user or resource constraints (TaskTypeAssetConstraint) or owner constraints (TaskTypeOwnerConstraint). Note also that the checkConstraint() method takes an argument of type Task. The checkConstraint() method needs access to the Task object for which it must check the constraints. In particular, it must know how many Assets are available for the given Task at the given time. It does not have this reference, because TaskType objects contain no reference to a specific Task. Indeed, if two Tasks are of the same type, they refer to the same TaskType object. Initially, our plan was to also include task-specific information in the TaskType class, such as the Task it references to, or the value of the fields related to the task type. Eventually, we decided not to keep this information and functionality in the TaskType class. This makes for better cohesion, as the TaskType class now is a well-defined concept. Additionally, it makes checking a task's type simple and straightforward: as in resource types and user types, it suffices to check that two attributes reference the same object. This comparison is summarized in table \ref{Independent TT}. \begin{table} \begin{tabular}{|l|} \hline (Dis)advantages Task-independent TaskType \\ \hline + The TaskType class has high cohesion \\ + A task's task type can be compared in an good and easy way \\ + Lower coupling: TaskType has no reference to any Task \\ \hline - Coupling is still in place via the checkConstraints() method\\ \hline \end{tabular} \caption{Advantages and disadvantages of a Task-independent TaskType} \label{Independent TT} \end{table} \begin{figure} \includegraphics[scale=0.5]{images/checkConstraints().jpg} \caption{Sequence diagram for TaskType.checkConstraints()} \label{checkConstraints} \end{figure} \begin{figure} \includegraphics[scale=0.3]{images/canBeExecuted.png} \caption{Sequence diagram for Task::canBeExecuted()} \label{canBeExecuted} \end{figure} As mentioned, we use the TaskType class to check the constraints. In turn, it needs to access the Task argument it receives to check whether enough Assets are available for the constraint to be satisfied. A sequence diagram indicating the flow of events is given in figure \ref{checkConstraints}. Alternatively, we could have checked for the constraints in the TaskAssetManager, but then this class needed to be aware of constraints. We summarized advantages and disadvantages of checking constraints in the TaskAssetManager or in the TaskType in table \ref{TaskType Tam}. \begin{table} \begin{tabular}{|p{7cm}|p{5cm}|} \hline (Dis)advantages checking in TaskType & (Dis)advantages checking in TaskAssetManager\\ \hline + TT knows what constraints have to be satisfied & + TAM knows what Assets are available\\ + High cohesion: TaskType specifies and checks constraints;&\\ TAM needs not know about constraints, only Assets and AssetAllocations& \\ \hline - checkConstraints() needs to pass Task as argument & - Lower cohesion: TAM needs to know about constraints\\ & - TAM needs to fetch constraints from TT\\ \hline \end{tabular} \caption{Advantages and disadvantages of checking constraints in the TaskType or in the TaskAssetManager} \label{TaskType Tam} \end{table} \subsubsection{TaskFactory} Because the task's complexity increased severely when including the tasktype functionality, we decided to leave the creation of a task to a specific factory. Originally this factory was used to clone the tasktype, so every task would have its own instance of a tasktype. However after discussing the responsibility of tasktype, it became apparent to leave tasktype as only one instance. The functionality of the taskfactory dropped to simple redirecting and facading the task constructor. Towards the future, as soon as the tasks implementation changes, the true value of this factory will show since the factory will remain responsible for the creation of tasks. \subsection{Login} From the start of the project, we started with the idea of a multiple user system. Therefor it was very easy to adjust our application to work with data that consisted of multiple users.\\ We implemented a log in solution as described in the assignment. By getting a list of all of the users in the system, that were stored in repositories, we would provide the system user with the option to select and log in as his own user.\\ Once logged in, the application is user based. Every action the system user undertakes is done in his/her name. \subsubsection{Administrator} The assignment also mentioned there had to be a user in the role of administrator. This user had other permissions then normal users. He (and only he) could: \begin{itemize} \item{Adjust the current time} \item{Create a user} \end{itemize} We discussed on how to implement the administrator role. We considered that the UserTypes that were introduced in the third iteration would be a possible option. Also putting a hard-coded user, with a hard-coded usertype in the application would be an option. Or simply creating a menu-item called "Administrator Menu".\\ \begin{tabular}{|l|l|} \hline \multicolumn{2}{|c|}{(Dis)advantages User Types} \\ \hline + & The user model stays intact\\ + & In future, permissions can be mixed. Users can have multiple usertypes\\ \hline - & We can't be sure there is a UserType with the name Admin\\ - & More then 1 person can become administrator\\ \hline \end{tabular} \begin{tabular}{|l|l|} \hline \multicolumn{2}{|c|}{(Dis)advantages Hardcoded administrator} \\ \hline + & The user model stays intact\\ + & In future, permissions can be mixed. Users can have multiple usertypes\\ \hline - & Hard-coded is not dynamic\\ - & More then 1 person can become administrator\\ \hline \end{tabular} \begin{tabular}{|l|l|} \hline \multicolumn{2}{|c|}{(Dis)advantages Administrator Menu} \\ \hline + & Administrator and user actions are cleanly separated\\ + & No hard-coded data\\ \hline - & The administrator is not a user\\ \hline \end{tabular} In the end, we chose for the Administrator Menu. A simple, separated menu for the administrator. This seemed better to us in many ways as everything is separated, no hard-coded data is needed. Also the specifications (XML, assignment) were not clear about who can/can't be administrator. Therefore this seemed clearest to us all. \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/ssd_log_in.png} \end{center} \caption{System Sequence Diagram describing the removing of a resource} \end{figure} \subsection{Focus Work update} In the previous iteration, we decided to use an implementation of Strategy pattern for the Focus Work use case. This worked very well for us in the third iteration, as we encountered a new strategy to list tasks: by a task type.\\ Our implementation of strategy pattern contained 2 methods: \emph{List<Task> filter(List<Task> tasks)} and \emph{List<Task> sort(List<Task> tasks)} as all these methods only had to be sorted by a certain strategy (By deadline/...) and filtered (Only show the first \emph{n} tasks/Only show tasks of TaskType X/...)\\ \section{Other changes} \subsection{Refactoring} We decided not to refactor the GUI of our project. We figured this was outside the scope of the assignment, and furthermore, no unit tests were available to support refactoring. Only one significant change (documented below) was made in the GUI, and otherwise, the code was left as it was before. The following then, is a short overview of the refactoring work done on the controllers and model section of our project. The reader will notice that this report is quite brief. We believe that our original code was already of high quality; relatively few bad smells were detected, and consequently, relatively little refactoring has been done. \subsubsection{Controllers} The refactoring of the controllers wasn't too much work as these were quite good as is. In other words, not too many bad smells were detected when reviewing them. The biggest problem here were variables that had been given unclear names. This might cause uncertainties or problems when you, or especially when another member of the team, starts reworking the existing code. Changing these variable names also reduces the need to read the Javadocs of said methods, or in some cases removes it altogether. As a result, the work that needs to be done can be done more efficiently and will also frustrate the programmer less. As you can see, even something as small as this is worth the trouble of doing right from the start. The controllers that had the most problems with this were the repository manager and the focus work classes. In the XMLParser there was a large procedural algorithm for parsing the XML file and initializing the model. This however leads to a \emph{Long Method} with comments explaining the different sections of the algorithm. This was solved using \emph{Extract Method} to separate each section while also grouping related sections of the algorithm together. The next problem was the usage of a few switch cases throughout the code. The focus factory had a switch case to determine which type of focus the user wants to create. The refactoring book clearly states that switch cases cause a bad smell. However, being a factory, there is no good way around this. Because this is a factory, it also solves the bad smell of the switch case itself. The problem with switch cases is that these may appear more than once in your code. Since the whole point of the factory is to take care of the creation of the wanted focus object, this switch case will never appear again throughout the model. In other words, this piece of code should not be sensitive to duplicated code. The only other place this appears is in the GUI. The case in the GUI has therefore been adjusted in such a way that it now asks the factory for the possibilities and prints these on the screen, rather than the hard-coded print it used to have. This takes away the control the GUI has over this and gives it back to the factory. Because of this, the only occurrence of the switch case can be found here. We have thought about using polymorphism to solve this problem. This, however, was not the way to go. As a result of this, the GUI would become massive and almost impossible to change in the future. This is, of course, even worse than having a switch case there. The last problem was also found in the focus factory. Depending on the type of focus you want, a certain amount of parameters needs to be passed. Earlier, the method asked to pass all the parameters. This resulted in the programmer having to pass more parameters than needed in some cases, some of them being null objects. The method has been reworked so that an array is now passed instead. The method is now much cleaner and easier to use. \subsubsection{Models} When looking at the models, we found small problems such as \emph{Data Clumps} and \emph{Long parameter list}. When creating a Task object, the GUI would call a controller, who would in turn call a Task constructor. All these methods would share the same few parameters. This would form a problem when we would want to change a method, or add a parameter. Eventually, having a long parameter list would simply slug the development process. We introduced a \emph{Value Object}. In the GUI, a small object would be created that contains the parameters that are shared along all of these methods. It is passed along these methods, and only extracted when needed, in the Task Constructor. Having a value object makes changing parameters a very quick and easy task. It also keeps our code a little bit cleaner. We also solved a few bad smells, all related to two issues: \emph{Long Method} and \emph{Duplicate Code}. Often, both issues could be solved using the same solution: \emph{Extract Method}. A long method means a lot is going on in one method. If a lot is going on, the method becomes bloated and more difficult to understand. Splitting it up makes it very clear to see what happens, and gives the developer a great overview so he can make faster and better changes. Smaller methods are usually very clear in exactly what they do, no matter how small their footprint. This is also the case for duplicate code. Often a small operation is used throughout several methods. However it's explicitly coded in all of these methods. If however that operation needs a change, that could create a large task for the developer. Extracting that operation to a separate method takes all of these problems away. For example, in a few places, the code was changed to use the method Task.dependsOn(), a method that already existed, but wasn't always used. Most of the refactoring in the model was done on the Task class. In other model classes, only little refactoring was necessary. A few small changes were made - most of them related to poorly chosen variable names. In the method Project.remove() a few lines of obsolete code have been removed. \subsubsection{Misunderstood patterns} During one of our meetings with Mario, it became clear that we completely misunderstood where the XML parser and the factories actually belonged. Previously, we were rather confident that these belonged in the controller package. Thanks to Mario, we now understand that this is not the case. Some refactoring had to be done! Sadly we only discovered this one week after turning in our report about refactoring, so we were not able to include it there. We moved the factories and the parser to the model, where they truly belonged. Because we still needed to access them outside of the model package, controllers were also created for these. \subsection{Software Initialization} The initialization changed quite a bit since the last report. This is mainly caused by the new requirements. The biggest changes are in the parsing of the XML files. The parser got split up into two different parsers. One to parse the theme and one to parse the actual data. The first step in initiating the program is creating a new repository manager used to save all the objects. Next the dispatch controller gets created. This controller also gets a reference to the manager so it can easily pass it to all the other controllers. The dispatch controller's main task is to take care of the creation of all the other controllers and passing on the manager to them as well. After that, the dispatch controller will be used to call the right controller for the job. Now that both the manager and the controllers are initiated, it's time to start reading the XML files. This is done by calling the XML controller and asking it to parse the files you pass to it. The controller will receive two files. One theme file and one data file. The theme file is the one with all the information in it regarding all the types of tasks, users, resources and so on. The data file is the one with the actual data. The controller will create a temporary theme parser and pass the theme file to it. It will also pass three maps by reference. One for task types, one for resource types and one for user types. The theme parser will then read in all the data from the theme file and save it in both the repository manager and place them in the maps. The maps are needed because we do not save most of the id's in our model. Since the data parser references to these id's, we still need to keep them around during the whole parsing time. The maps' keys are the id's and the values are the created objects. When the theme parser has finished parsing, it's time for the data parser to get to work. The parser will first parse all the resources and projects. It will then create and save them in the repository manager respectively. After that, it will start parsing all the users and create them one by one. At the same time it will start creating these users' tasks. These tasks get bound to their project and their reservations are made from the start. Now that all tasks are created, all the invitations are parsed and made for each task. Once this is done, we can finally start linking all the tasks' dependencies. Most of the work is done by now. The parser only needs to set all the tasks to their defined state in the XML data file. Because the TaskState transition rules are in effect when setting the states during parsing, the system time has to be set properly to allow the TaskState to be set, more particularly when state should be set successful. To do this Task has a methode \emph{getEaliestExecTime()} which, when the Task has the allocations to be executed succesfully, return the earliest time the Task can be set succesful, provide the dependencies are succesful aswell. It does this by first getting the earliestExecTime() of the dependencies, getting the earliestExecTime from the asset allocations, which returns the earliest time enough assets are allocated, and taking the latest of all of them. In the parser the tasks are sorted according to this. Then they are iterated, whenever the inteded state is succesful, the system time is set to the time from \emph{getEarliestExecTime()} and set to the proper state. The advantage of this system is that it should prevent invalid XML files from being parsed, making sure the system starts out in a consistent state. Downside is that this method is nested deep in the model causing strong binding. The method actually bears strong resemblance to \emph{canBeExecuted()}, so it would be posible to implement \emph{canBeExecuted()} through this method, increasing code reuse, but this method support on the simple constraints and allocations, so in future this method could be imposible to implement, forcing us to redo \emph{canBeExecuted()}. Possible solutions in the future if this method fails is to make a fourth state only used and accesible from the parser, which removes the requirement checking. This would allow any valid XML to be parsed but wouldn't detect invalid XML files. The only thing left to do now, is parse the system time and set it in the system's clock. The data parser will then return the list of parsed users to the controller, which in its turn passes it to the GUI. The GUI will check if the parsing succeeded and will start printing the menu. If something went wrong during the parsing, it will print a message describing the error. The program is now up and running and the user can start using it! \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/System_Initiation.jpg} \end{center} \caption{System Sequence Diagram describing the removal of a task} \end{figure} \subsection{Future TaskState} While not specified in the usecases, the transitionf from Available to Succesful task state is a bit unrealistic. When \emph{setSuccessful()} is called, if the the task can be executed, its state immediately changes to successful without a lapse of time. This would allow dependent tasks to be executed immediately after. A more realistic way could be done by making a \emph{Executing} TaskState. This state would have all restrictions of successful TaskState, but \emph{isSuccessful()} would still return false. The state would keep the time at which it was started and be notified of time changes. Only after task duration has passed would the state change to successful. \part{Testing Approach} \section{Testing} \subsection{Technology} We decided to use JUnit4 for testing. JUnit4 has a few advantages compared to JUnit3. It uses annotations to define a Test. This gives the developer an easy solution to testing for Exceptions. \subsection{Testing Approach} We decided to go for a defensive and multi-level testing approach. By multi-level we mean that we test both methods in Model classes (such as User, Task, Resource etc.) as well as testing the Controllers that call these methods (TaskController, ResourceController etc). This way we get a good view on where errors are: model, controller or view.\\ We also tested most methods for both cases. This means that we test both failure and succession of a method. Testing only for success does not guarantee a correct Exception is thrown, or success in all cases. \subsection{Coverage Report} We had a 88 \% test coverage on the source folder of our project. An Eclemma coverage report is included in the zip file. Unfortunately, this coverage report does not distinguish between our model classes and the testclasses. As such, the Eclemma report can be a bit confusing, and its figures may differ from the actual test coverage. \part{Project Management} \section{Project Management - Iteration 1} \subsection{Planning} Our Team Assignment had the following planning in iteration 1:\\ \begin{tabular}{p{200 pt}|c|c} Activity & From & To\\ \hline First meeting \& Discussion of our views on the project & 9/10/2009 & 9/10/2009\\ \hline Creation of a draft class diagram \& working out System Sequence Diagrams & 9/10/2009 & 12/10/2009\\ \hline First meeting with our advisor. & 12/10/2009 & 12/10/2009\\ \hline Individual rework of Class Diagram & 12/10/2009 & 14/10/2009\\ \hline Comparing results and creation of definitive Class Diagram & 14/10/2009 & 14/10/2009\\ \hline Creation of Sequence Diagrams & 14/10/2009 & 16/10/2009\\ \hline Development of Model classes and Controller classes. Building of GUI structure & 16/10/2009 & 19/10/2009\\ \hline Code review by team and rewriting certain functionalities & 19/10/2009 & 26/10/2009\\ \hline Start writing report \& finishing UML diagrams & 24/10/2009 & 27/10/2009\\ \hline \end{tabular} \subsection{Teamwork} We focused on a close teamwork. We started the project with a team discussion on how everyone saw the project and interpreted the assignment. This gave the team a general perspective and good grasp on how we wanted to implement it.\\ We have 3 weekly physical meetings, where 1 would be with our team advisor. In these meetings, we discussed what everyone had done in the past days. Whenever somebody was unclear, or the group had doubts about which method or pattern would be the best to use, time was never an issue to come to a general solution that seemed best to everyone.\\ We also used several team collaboration tools provided by Google such as Google Code and Google Groups. This gave advantages such as a mailing list, subversion with the option to review code at each revision, issue lists and hosted files.\\ We tried to keep all the information as centralized as possible by using a separate Subversion repository for the Visual Paradigm file. This way every team member always had the most up to date diagrams available.\\ Development of the project was divided in 4 groups of functionality: Controllers, Models, View and Testing. Each member of the team was assigned one of these tasks. Steven Thuriot took on Controllers and the parsing of XML, Kwinten Missiaen Models, Koen Van den dries the View and Bart Vangeneugden Testing. The report was structured and drafted by Bart Vangeneugden however every team member wrote about the part of development he was responsible for.\\ \subsection{Timing} \textbf{Iteration 1:} We had a total of 18 hours meeting physically for the project. Besides that every group member worked at home. A short estimate follows:\\ \begin{tabular}{l|l} Member & Time(+/-)\\ \hline Kwinten Missiaen & 30\\ Steven Thuriot & 25\\ Koen Van den dries & 23\\ Bart Vangeneugden & 25\\ \end{tabular} \subsection{Self-Evaluation} It is our opinion that we had a great team effort. However, we can improve. In this iteration we were too eager to get a first version of Class- and other diagrams ready. It would have been a better choice to make a good class-per-class analysis before drawing.\\ To conclude we had no real problems regarding teamwork. Also we learned a lot about project organization. \section{Project Management - Iteration 2} \subsection{Planning} The planning of our Team Assignment had the following planning:\\ \begin{tabular}{p{300 pt}|c} Activity & Date\\ \hline First meeting to discuss the new iteration, discussion about the state pattern, observer pattern, the business rules and a new way to implement FocusWork. & 20/11/2009\\ \hline Adjusting parts of the class diagram \& System Sequence Diagrams to use the state pattern. & 25/11/2009\\ \hline Discussion about splitting task into two objects because it is currently bloated. Decided on using the strategy pattern for FocusWork. & 27/11/2009\\ \hline Discussed two options of splitting task. Decided on making a new class that is an attribute of task. & 30/11/2009\\ \hline Talked about the best way to implement some parts of the state pattern. Discussed making a generics wrapper so GUI has a describable but can't touch the actual objects. & 07/12/2009\\ \hline Discussed some code issues and set up some deadlines for the code and the report. & 11/12/2009\\ \hline Start writing report \& finish polishing up the UML diagrams & 16/12/2009\\ \hline Finish the report and go over it one more time as a team. & 18/12/2009\\ \hline \end{tabular} \subsection{Teamwork} We had a total of 16 hours meeting physically for the project. Besides that, every group member also worked at home. A short estimate follows:\\ \begin{tabular}{l|l} Member & Time(+/-)\\ \hline Kwinten Missiaen & 18\\ Steven Thuriot & 18\\ Koen Van den dries & 17\\ Bart Vangeneugden & 15\\ \end{tabular} \subsection{Self-Evaluation} Just like last time, we had no real problems regarding teamwork. We also applied the same strategy. Meeting often and working hard. We divided the actual code among us, so it would be manageable to work on. Kwinten took care of making the time controller, the clock and the task dependency manager. Steven handled implementing the state pattern. Koen worked on the observer pattern, the describable wrapper and the GUI. Bart also worked on the GUI, but he also implemented the strategy pattern and the repositories. Finally, we all worked hard on writing tests and adjusting the UML diagrams. Everything was discussed thoroughly and everyone put in his best effort to try and make this iteration a success as well. \section{Project Management - Iteration 3} \subsection{Planning} \begin{tabular}{p{200 pt}|c} Activity & Date\\ \hline \multicolumn{2}{l}{\emph{Refactoring:}} \\ \hline First meeting and discussion: & 9/3/2010 \\ \hline Bad smells detecteren en documenteren: & 10/3 - 20/3/2010 \\ \hline Meeting en verdeling van refactoren: & 22/3/2010\\ \hline Refactoren: & 23/3 - 10/4/2010\\ \hline Refactoring rapport & 11/4 - 21/4/2010\\\hline \multicolumn{2}{l}{\emph{Iteration 3}} \\ \hline Weekly meetings discussing new requirements & 23/4 - 21/5/2010\\\hline Development and weekly evaluation & 3/5-25/5/2010 \\\hline GUI testing and code finalization & 25/5/2010\\ \hline \end{tabular} \subsection{Teamwork} We spent approximately 5 hours meeting to refactor the code, and about 10-15 hours meeting to code the last iteration. The time each team member spent individually is shown in the table. \begin{tabular}{l|l|l} Member & Time(+/-) \\ \hline Kwinten Missiaen & 23 \\ Steven Thuriot & 26 \\ Koen Van den dries & 25 \\ Bart Vangeneugden & 27 \\ \end{tabular} \subsection{Self-Evaluation} Just like last time, we had no serious problems regarding teamwork. As before, we spent a lot of time meeting and discussing the project. Our planning this semester was not great. Because we didn't have to spend a lot of time refactoring, we lost our rhytm, and started a bit too late. Even though one of our team members was very busy, we still managed to finish the project - thanks also to an extension of the deadline by one week. \newpage \appendix \section{State Pattern} \begin{figure}[H] \begin{center} \includegraphics[scale=0.6]{images/State_Pattern.png} \end{center} \caption{State Pattern Diagram} \end{figure} \section{FocusWork} \begin{figure}[H] \begin{center} \includegraphics[scale=0.5]{images/focus_class_diagram.jpg} \end{center} \caption{Specialized class diagram explaining the Strategy Pattern} \end{figure} \section{Controller Class Diagram} \begin{figure}[H] \begin{center} \includegraphics[width=\textwidth]{images/ControllerClassDiagram.jpg} \end{center} \caption{Controller Class Diagram} \end{figure} \section{Repository Class Diagram} \begin{figure}[H] \begin{center} \includegraphics[scale=0.6]{images/RepositoriesClassDiagram.jpg} \end{center} \caption{Repository Class Diagram} \end{figure} \section{Model Class Diagram} \begin{figure}[H] \begin{center} \includegraphics[width=\textwidth]{images/ModelClassDiagram.jpg} \end{center} \caption{Model Class Diagram} \end{figure} \end{document}
{ "alphanum_fraction": 0.7639023149, "avg_line_length": 95.7612667479, "ext": "tex", "hexsha": "bfd3d3f0a57f18e5d0d0b74b05ffe47383ed5b2e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "e4d3be4871a45edb7954c86625c5a903884f3d99", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "StevenThuriot/MOP", "max_forks_repo_path": "Java/report/report.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "e4d3be4871a45edb7954c86625c5a903884f3d99", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "StevenThuriot/MOP", "max_issues_repo_path": "Java/report/report.tex", "max_line_length": 2242, "max_stars_count": null, "max_stars_repo_head_hexsha": "e4d3be4871a45edb7954c86625c5a903884f3d99", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "StevenThuriot/MOP", "max_stars_repo_path": "Java/report/report.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 17860, "size": 78620 }
\section{Top Publishing Results for Antonacopoulou} The Google Scholar search for Elena Antonacopoulou shows the following papers with at least 200 citations:\\ \begin{itemize} \item "The Relationship between Individual and Organizational Learning: New Evidence from Managerial Learning Practices" (2006) cited 396 times. \item "Emotion, learning and organizational change: Towards an integration of psychoanalytic and other perspectives" (2001) cited 365 times. \item "Absorptive Capacity: A Process Perspective" (2008) cited 282 times. \item "Reframing Competency In Management Development" (1996) cited 238 times. \item "Making the Business School More ‘Critical’: Reflexive Critique Based on Phronesis as a Foundation for Impact" (2010) cited 245 times. \item "The Social Complexity of Organizational Learning: The Dynamics of Learning and Organizing" (2007) cited 222 times. \end{itemize}
{ "alphanum_fraction": 0.8046875, "avg_line_length": 68.9230769231, "ext": "tex", "hexsha": "f42a023c2e9da0071727461661ce40d6d2349d7f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c41e4ced8365cf15b3f7709fa587e67af4a595c2", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "dfhawthorne/tex_projects", "max_forks_repo_path": "Organizational_Inertia/authorantonacopoulou.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c41e4ced8365cf15b3f7709fa587e67af4a595c2", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "dfhawthorne/tex_projects", "max_issues_repo_path": "Organizational_Inertia/authorantonacopoulou.tex", "max_line_length": 144, "max_stars_count": null, "max_stars_repo_head_hexsha": "c41e4ced8365cf15b3f7709fa587e67af4a595c2", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "dfhawthorne/tex_projects", "max_stars_repo_path": "Organizational_Inertia/authorantonacopoulou.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 214, "size": 896 }
\chapter{Nonlinear filtering} \label{ch:RNN} The SWR detectors discussed up to this point -- both the band-pass and the GEVec-based filters -- calculate their output as a linear combination of input samples. This chapter explores whether a nonlinear method to calculate the output signal can improve SWR detection performance. \input{RNNs} \input{GRU-eqs} \input{Optimize} \input{Results}
{ "alphanum_fraction": 0.7928388747, "avg_line_length": 39.1, "ext": "tex", "hexsha": "027671ef6823968c91f8e3bb4fff0dbf35ee9fd9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "3e97128eeb18827b03da90817fe6f6985c84ad80", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "tfiers/master-thesis", "max_forks_repo_path": "modules/RNN/index.tex", "max_issues_count": 46, "max_issues_repo_head_hexsha": "3e97128eeb18827b03da90817fe6f6985c84ad80", "max_issues_repo_issues_event_max_datetime": "2018-12-10T22:37:35.000Z", "max_issues_repo_issues_event_min_datetime": "2018-09-18T16:38:12.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "tfiers/master-thesis", "max_issues_repo_path": "modules/RNN/index.tex", "max_line_length": 281, "max_stars_count": 1, "max_stars_repo_head_hexsha": "3e97128eeb18827b03da90817fe6f6985c84ad80", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "tfiers/master-thesis", "max_stars_repo_path": "modules/RNN/index.tex", "max_stars_repo_stars_event_max_datetime": "2021-03-23T01:39:24.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-23T01:39:24.000Z", "num_tokens": 92, "size": 391 }
% !TeX spellcheck = en_US % !TeX root = DynELA.tex % % LaTeX source file of DynELA FEM Code % % (c) by Olivier Pantalé 2020 % \chapter{DynELA programming language} \startcontents[chapters] \printmyminitoc[1]\LETTRINE{T}his chapter deals about the \DynELA~programming language. This language is based on Python 3 and all models must be described using this formalism. Therefore, this chapter will describe step by step how to build a Finite Element Model for the \DynELA, using the Python 3 language. \section{Introduction and basic knowledge} \subsection{Calling the python interpreter} After the installation and compilation phase of the code\footnote{See the installation instructions in chapter \ref{Chapter!Installation} of the preamble, page \pageref{Chapter!Installation}}, the \DynELA~can be run using the following command: \begin{BashListing} python model.py \end{BashListing} where \textsf{model.py} is the Python 3 source file defining the Finite Element Model. The \textsf{model.py} file contains the definition of the Finite Element Model using a Python 3 language and calls to specific DynELA methods written in \Cpp. \subsection{Formalism of a DynELA python file} To build a Finite Element Model, it is mandatory to import the \textsf{dnlPython} interpreter from the \textsf{.py} script. Conforming to this formalism, we give hereafter the minimal piece of Python code to set up a Finite Element Model in the \DynELA. \begin{PythonListing} #!/usr/bin/env python3 import dnlPython as dnl # Imports the dnlPython library as dnl model = dnl.DynELA() # Creates the main Object ... # Set of instructions to build the FE model ... # conforming to the DynELA language and Python 3 model.solve() # Runs the solver ... # Set of instructions to postprocess the FE model \end{PythonListing} In the preceding piece of code, line 2 is used to load into the namespace \textsf{dnl} the \textsf{dnlPython} module containing the interface to all \Cpp~methods of the \DynELA, based on the use of the SWIG Python interface. Therefore, all public methods of the \DynELA~written in \Cpp~can be called from the Python script to build the Finite Element Model, launch the solver, produce output results,\ldots In the proposed piece of code, line 3 is used to create an object of type \textsf{DynELA} (the higher object type in the \DynELA~library) and instantiate it as the \textsf{model} object\footnote{For the rest of this chapter, we assume that the name of the instantiated \textsf{DynELA} object is \textsf{model.}}, while line 6, the solver of the \DynELA~library is called to solve the problem and produce the results. As the interpreter of the \DynELA~is based on Python 3 language, we can use all instructions valid in Python 3 along with the specific DynELA instructions. In the rest of this documentation, we assume that the notions of programming in Python are mastered, and we will focus only on the functions specific to the \DynELA. \section{The Kernel library} \#include "LogFile.h" \#include "MacAddress.h" \#include "Settings.h" \#include "String.h" \#include "System.h" \#include "Timer.h" \#include "Field.h" \section{The Maths library} \#include "DiscreteFunction.h" \#include "DiscreteFunctionSet.h" \#include "Function.h" \#include "Matrices.h" \#include "Matrix.h" \#include "MatrixDiag.h" \#include "PolynomialFunction.h" \#include "RampFunction.h" \#include "SinusFunction.h" \#include "SymTensor2.h" \#include "Tensor2.h" \#include "Tensor3.h" \#include "Tensor4.h" \#include "Vec3D.h" \#include "Vector.h" \#include "ColorMap.h" \section{Model, Nodes and Elements} All Finite Element Models involves nodes and elements. The very first part of the model is therefore to create the nodes and the elements of the structure to set up a Finite Element Model. The \DynELA~library doesn't include any meshing procedure yet, therefore, it is mandatory to create all elements and all nodes by hand or using Python loops in case it can be used. Another way is to use an external meshing program and convert the output of this program to produce the ad hoc lines of Python to describe the elements and the nodes of the model. This has been used many times by the author, and the Abaqus Finite Element code is an efficient way to create the mesh using the \textsf{.inp} text file generated by the CAE Abaqus program. \subsection{Model} Definition of a model in the \DynELA~is done by creating an instance of the \textsf{DynELA} object into memory. This is done by calling the \textsf{dnlPython.DynELA()} method that returns an object of type \textsf{DynELA} as presented hereafter. \begin{PythonListing} import dnlPython as dnl # Imports the dnlPython library as dnl model = dnl.DynELA("Taylor") # Creates the main Object model named Taylor \end{PythonListing} In line 2 of the preceding piece of code, a reference name\footnote{The reference name is a string used to identify the object, this is completely optional but useful for debugging purposes for example as one can know the associated name to an object.} \textsf{Taylor} is associated to the model object during creation. Once the model is created, one can then define all nodes, elements, materials, constitutive laws, boundary conditions,\ldots \subsection{Nodes} \subsubsection{Definition of the nodes} In the \DynELA, creation of nodes is done by calling the \textsf{DynELA.createNode()} method. Therefore, a node is created by calling the \textsf{createNode()} method and giving the new node number and the $x$, $y$ and $z$ coordinates of the new node as presented just below. \begin{PythonListing} model.createNode(1, 0.0, 0.0, 0.0) # Creates node 1, coordinates [0.0, 0.0, 0.0] model.createNode(2, 1.0, 2.0, -1.0) # Creates node 2, coordinates [1.0, 2.0, -1.0] \end{PythonListing} An alternative method can be used if the coordinates of the node are already stored into a Vec3D object as presented hereafter. \begin{PythonListing} vect = dnl.Vec3D(1.0, 2.0, -1.0) # Creates a Vec3D object [1.0, 2.0, -1.0] model.createNode(1, vect) # Creates node 1 with coordinates vect \end{PythonListing} A check of the total number of nodes of the structure can be done using the \textsf{DynELA.getNodesNumber()} method that returns the total number of nodes created. \subsubsection{Definition of the Nodes sets} Manipulation of nodes, application of boundaries conditions, etc,\ldots is done through the definition of nodes sets. Such nodes sets are used to group nodes under a \textsf{NodeSet} object for further use. A \textsf{NodeSet} object contains a reference name and a list of nodes. Creation of a \textsf{NodeSet} is done using the \textsf{DynELA.NodeSet()} method that returns an new \textsf{NodeSet} instance. The \textsf{NodeSet} can be named during the creation by specifying its name as a string. \begin{PythonListing} nset = dnl.NodeSet("NS_All") \end{PythonListing} When the \textsf{NodeSet} has been created, one can now define the list of nodes constituting the \textsf{NodeSet} with the generic \textsf{DynELA.add()} method with the following formalism: \textsf{DynELA.add(nodeset, start, end, increment)} Hereafter is some self explaining examples to illustrate this process. \begin{PythonListing} nset = dnl.NodeSet("NS_All") model.add(nset, 2) # Add node number 2 to node set model.add(nset, 1, 4) # Add nodes number 1-4 to node set model.add(nset, 1, 4, 2) # Add nodes number 1 and 3 to node set \end{PythonListing} \subsection{Elements} \subsubsection{Definition of the elements} Creation of elements is done by calling the \textsf{DynELA.createElement()} method. An element is created by calling the \textsf{createNode()} method and giving the new element number and the list of nodes defining the element shape separated by comas and ordered thanks to the element definition as presented just hereafter. \textsf{DynELA.createElement(elementNumber, node1, node2,\ldots)} Before creating the very first element of the structure, it is necessary to define the element shape using the \textsf{DynELA.setDefaultElement()} method. An example of element creation combining the two preceding methods is presented hereafter. \begin{PythonListing} model.setDefaultElement(dnl.Element.ElQua4N2D) # Defines the default element model.createElement(1, 1, 2, 3, 4) # Creates element 1 with nodes 1,2,3,4 \end{PythonListing} The following elements are available in the \DynELA. \begin{description} \item [ElQua4n2D]: 4 nodes bi-linear 2D quadrilateral element. \item [ElQua4NAx]: 4 nodes bi-linear axisymmetric quadrilateral element. \item [ElTri3N2D]: 3 nodes 2D triangular element. \item [ElHex8N3D]: 8 nodes 3D hexahedral element. \item [ElTet4N3D]: 4 nodes 3D tetrahedral element. \item [ElTet10N3D]: 10 nodes 3D tetrahedral element. \end{description} The total number of elements of the structure can be checked using the \textsf{DynELA.getElementsNumber()} method that returns the total number of elements created. \subsubsection{Definition of the Element sets} Declaration of materials, boundaries conditions, etc\ldots is done through the definition of elements sets. Such elements sets are used to group elements under an \textsf{ElementSet} object for further use. An \textsf{ElementSet} object contains a reference to a name and a list of elements. Creation of an \textsf{ElementSet} is done using the \textsf{DynELA.ElementSet()} method that returns a new \textsf{ElementSet} instance. The \textsf{ElementSet} can be named during the creation by specifying its name as a string. \begin{PythonListing} eset = dnl.ElementSet("ES_All") \end{PythonListing} When the \textsf{ElementSet} has been created, one can now define the list of elements constituting the \textsf{ElementSet} with the generic \textsf{DynELA.add()} method according to the following formalism: \textsf{DynELA.add(elementset, start, end, increment)} Hereafter is some self explaining examples to illustrate this process. \begin{PythonListing} eset = dnl.ElementSet("ES_All") model.add(eset, 2) # Add element number 2 to element set model.add(eset, 1, 4) # Add elements number 1-4 to element set model.add(eset, 1, 4, 2) # Add elements number 1 and 3 to element set \end{PythonListing} \subsection{Coordinates transformations} When the mesh has been created, it is always possible to modify the geometry of the structure by applying some geometrical operations such as translations, rotations and change of scale. Those operations apply on a \textsf{NodeSet}. \subsubsection{Translations} One can define a translation of the whole model or a part of the model by defining a translation vector (an instance of the \DynELA~\textsf{Vec3D}) and apply this translation to the whole structure (without specifying the \textsf{NodeSet}) or a \textsf{NodeSet} using the \textsf{DynELA.translate()} method with the following syntax. \begin{PythonListing} vector = dnl.Vec3D(1, 0, 0) # Defines the translation vector model.translate(vector) # Translates the whole model along [1, 0, 0] model.translate(vector, nset) # Translates the NodeSet nset along [1, 0, 0] \end{PythonListing} \subsubsection{Rotations} One can define a rotation of the whole model or a part of the model by defining a rotation vector (global axes $\overrightarrow{\ensuremath{x}}$, $\overrightarrow{y}$, $\overrightarrow{\ensuremath{z}}$ or an instance of the \DynELA~\textsf{Vec3D}) and an angle $\alpha$ then apply this rotation to the whole structure (without specifying the \textsf{NodeSet}) or a \textsf{NodeSet} using the \textsf{DynELA.rotate()} method with the following syntax. \begin{PythonListing} model.rotate('X', angle) # Rotation of the whole structure around X model.rotate('X', angle, nset) # Rotation of NodeSet nset around X axis = dnl.Vec3D(1.0, 1.0, 1.0) # Defines the axis of rotation model.rotate(axis, angle) # Rotation of the whole structure around axis model.rotate(axis, angle, nset) # Rotation of NodeSet nset around axis \end{PythonListing} \subsubsection{Scaling} One can define a scaling of the whole model or a part of the model by defining a scale factor or a scale vector (an instance of the \DynELA~\textsf{Vec3D}) and apply this scaling operation to the whole structure (without specifying the \textsf{NodeSet}) or a \textsf{NodeSet} using the \textsf{DynELA.scale()} method with the following syntax. \begin{PythonListing} model.scale(value) # Scales the whole structure by factor value model.scale(value, nset # Scales the NodeSet nset by factor value vec = dnl.Vec3D(2.0, 1.0, 1.0) # Defines the scale vector model.scale(vec) # Scales the whole structure by a factor of 2.0 on x model.scale(vec, nset) # Scales the NodeSet nset by a factor of 2.0 on x \end{PythonListing} \section{Materials} \subsection{Declaration of materials} \subsubsection{Material declaration} Creation of a Material is done using the \textsf{DynELA.Material()} method. It is possible to give a name to a material during the creation process by specifying it through a string during the declaration. This can be used further. \begin{PythonListing} # Creates the material steel = dnl.Material("Steel") \end{PythonListing} \subsubsection{General properties of materials} General properties of materials in \DynELA~concerns the general constants such as Young's modulus, Poisson's ratio, density,\ldots The complete list of parameters is reported in Table \ref{tab:Programming!GeneralProperties}. \begin{table}[h] \begin{center}\begin{tcolorbox}[width=.75\textwidth,myTab,tabularx={l|c|c|R}] \multicolumn{1}{c|}{Name} & Symbol & Unit & \multicolumn{1}{c}{Description} \\ \hline\hline youngModulus & $E$ & $MPa$ & Young modulus\\ poissonRatio & $\nu$ & & Poisson ratio\\ density & $\rho$ & $kg/m^3$ & Density\\ heatCapacity & $C_{p}$ & $J/^{\circ}C$ & Heat capacity\\ taylorQuinney& $\eta$ & & Taylor-Quinney coefficient\\ initialTemperature & $T_{0}$ & $^{\circ}C$ & Initial temperature \end{tcolorbox}\end{center}\caption{General properties of materials\label{tab:Programming!GeneralProperties}} \end{table} After creating an instance of the object \textsf{dnl.Material}, on can apply the prescribed values to all those parameters using the following syntax. \begin{PythonListing} # Creates the material steel = dnl.Material("Steel") # Apply all parameters steel.youngModulus = 206e9 steel.poissonRatio = 0.3 steel.density = 7830 steel.heatCapacity = 46 steel.taylorQuinney = 0.9 steel.initialTemperature = 25 \end{PythonListing} \subsubsection{Material affectation to a set of elements} And, the material can be affected to the elements of the model by the \textsf{DynELA.add()} method as proposed hereafter. \begin{PythonListing} # Creates the material steel = dnl.Material("Steel") # Apply all parameters ... # Affect the material to the element set eset model.add(steel, eset) \end{PythonListing} \subsection{Johnson-Cook constitutive law} The Johnson-Cook constitutive law is an hardening law defining the yield stress $\sigma^{y}(\overline{\varepsilon}^{p},\stackrel{\bullet}{\overline{\varepsilon}^{p}},T)$ by the following equation: \begin{equation} \sigma^{y}=\left(A+B\overline{\varepsilon}^{p^{n}}\right)\left[1+C\ln\left(\frac{\stackrel{\bullet}{\overline{\varepsilon}^{p}}}{\stackrel{\bullet}{\overline{\varepsilon}_{0}}}\right)\right]\left[1-\left(\frac{T-T_{0}}{T_{m}-T_{0}}\right)^{m}\right] \end{equation} where $\stackrel{\bullet}{\overline{\varepsilon}_{0}}$ is the reference strain rate, $T_{0}$ and $T_{m}$ are the reference temperature and the melting temperature of the material respectively and $A$, $B$, $C$, $n$ and $m$ are the five constitutive flow law parameters. Therefore, this kind of hardening law can be defined by using the following piece of code: \begin{PythonListing} hardLaw = dnl.JohnsonCookLaw() # Hardening law hardLaw.setParameters(A, B, C, n, m, depsp0, Tm, T0) # Parameters of the law \end{PythonListing} Once the hardening law has been created, one have to link this hardening law to a material already defined using the following piece of code: \begin{PythonListing} # Creates the material steel = dnl.Material("Steel") # Creates the hardening law hardLaw = dnl.JohnsonCookLaw() # Attach hardening law to material steel.setHardeningLaw(hardLaw) \end{PythonListing} \section{Boundaries conditions} \subsection{Restrain boundary condition} \begin{PythonListing} # Declaration of a boundary condition for top part topBC = dnl.BoundaryRestrain('BC_top') topBC.setValue(0, 1, 1) model.attachConstantBC(topBC, topNS) \end{PythonListing} \subsection{Amplitude} \begin{PythonListing} # Declaration of a ramp function to apply the load ramp = dnl.RampFunction("constantFunction") ramp.set(dnl.RampFunction.Constant, 0, stopTime) \end{PythonListing} \subsection{Constant speed} \begin{PythonListing} # Declaration of a boundary condition for top part topSpeed = dnl.BoundarySpeed() topSpeed.setValue(displacement, 0, 0) topSpeed.setFunction(ramp) model.attachConstantBC(topSpeed, topNS) \end{PythonListing} \subsection{Initial speed} \begin{PythonListing} # Declaration of a ramp function to apply the load ramp = dnl.RampFunction("constantFunction") ramp.set(dnl.RampFunction.Constant, 0, stopTime) \end{PythonListing} \section{Fields} \subsection{Nodal fields} Nodal fields are defined at nodes and cover types defined in table \ref{tab:Programming!NodalFields}. Concerning those fields, some of them are directely defined at nodes, some other are extrapolated from integration points and transfered to nodes as reported in column \textsf{loc} of table \ref{tab:Programming!NodalFields}. Concerning types, \textsf{scalars}, \textsf{vec3D} and \textsf{tensors} are available. Depending in the type of data, different methods can be used to acces those data: \begin{description} \item [{scalar}] : Direct access to the value as it is unique. \item [{vec3D}] : Access to all $3$ components of a vec3D using \textsf{nameX}, \textsf{nameY}, \textsf{nameZ} or the norm of the vec3D using \textsf{name}. \item [{tensor}] : Access to all $9$ components of a tensor using \textsf{nameXX}, \textsf{nameXY},\ldots, \textsf{nameZZ} or the norm of the tensor using \textsf{name}. \end{description} \begin{table}[h] \begin{center}\begin{tcolorbox}[width=.75\textwidth,myTab,tabularx={l|c|c|c|R}] \multicolumn{1}{c|}{Name} & Type & Label & Loc & \multicolumn{1}{c}{Description} \\ \hline\hline density & scalar & & IntPt &\\ \hline displacementIncrement & vec3D & & node & \\ \hline displacement & vec3D & & node & \\ \hline energyIncrement & scalar & & IntPt & \\ \hline energy & scalar & & IntPt & \\ \hline gammaCumulate & scalar & & IntPt & \\ \hline gamma & scalar & & IntPt & \\ \hline internalEnergy & scalar & & IntPt & \\ \hline mass & scalar & & node & \\ \hline nodeCoordinate & vec3D & & node & \\ \hline normal & vec3D & & node & \\ \hline PlasticStrainInc & tensor & & IntPt & \\ \hline plasticStrainRate & scalar & & IntPt & \\ \hline plasticStrain & scalar & & IntPt & \\ \hline PlasticStrain & tensor & & IntPt & \\ \hline pressure & scalar & & IntPt & \\ \hline speedIncrement & vec3D & & node & \\ \hline speed & vec3D & & node & \\ \hline StrainInc & tensor & & IntPt & \\ \hline Strain & tensor & & IntPt & \\ \hline Stress & tensor & & IntPt & \\ \hline temperature & scalar & & IntPt & \\ \hline vonMises & scalar & & IntPt & \\ \hline yieldStress & scalar & & IntPt & \end{tcolorbox}\end{center}\caption{Nodal fields\label{tab:Programming!NodalFields}} \end{table} \subsection{Element fields} Element fields are defined at integration points and cover types defined in table \ref{tab:Programming!ElementlFields}. Concerning types, \textsf{scalars}, \textsf{vec3D} and \textsf{tensors} are available. Depending in the type of data, different methods can be used to acces those data: \begin{description} \item [{scalar}] : Direct access to the value as it is unique. \item [{vec3D}] : Access to all $3$ components of a vec3D using \textsf{nameX}, \textsf{nameY}, \textsf{nameZ} or the norm of the vec3D using \textsf{name}. \item [{tensor}] : Access to all $9$ components of a tensor using \textsf{nameXX}, \textsf{nameXY},\ldots, \textsf{nameZZ} or the norm of the tensor using \textsf{name}. \end{description} \begin{table}[h] \begin{center}\begin{tcolorbox}[width=.75\textwidth,myTab,tabularx={l|c|c|R}] \multicolumn{1}{c|}{Name} & Type & Label & \multicolumn{1}{c}{Description} \\ \hline\hline density & scalar & & \\ \hline gammaCumulate & scalar & & \\ \hline gamma & scalar & & \\ \hline internalEnergy & scalar & & \\ \hline plasticStrainRate & scalar & & \\ \hline plasticStrain & scalar & & \\ \hline PlasticStrain & tensor & & \\ \hline PlaticStrainInc & tensor & & \\ \hline pressure & scalar & & \\ \hline StrainInc & tensor & & \\ \hline Strain & tensor & & \\ \hline Stress & tensor & & \\ \hline temperature & scalar & & \\ \hline vonMises & scalar & & \\ \hline yieldStress & scalar & & \end{tcolorbox}\end{center}\caption{Element fields\label{tab:Programming!ElementlFields}} \end{table} \subsection{Global fields} \begin{table}[h] \begin{center}\begin{tcolorbox}[width=.75\textwidth,myTab,tabularx={l|c|c|R}] \multicolumn{1}{c|}{Name} & Type & Label & \multicolumn{1}{c}{Description} \\ \hline\hline kineticEnergy & scalar & & \\ \hline realTimeStep & scalar & & \\ \hline timeStep & scalar & & \end{tcolorbox}\end{center}\caption{Global fields\label{tab:Programming!GlobalFields}} \end{table} \section{Data Output during computation} \subsection{VTK Data files} \begin{PythonListing} model.setSaveTimes(0, stopTime, stopTime/nbreSaves) \end{PythonListing} \subsection{History files} \begin{PythonListing} dtHist = dnl.HistoryFile("dtHistory") dtHist.setFileName(dnl.String("dt.plot")) dtHist.add(dnl.Field.timeStep) dtHist.setSaveTime(stopTime/nbrePoints) model.add(dtHist) \end{PythonListing} \section{Solvers} \begin{PythonListing} # Declaration of the explicit solver solver = dnl.Explicit("Solver") solver.setTimes(0, stopTime) solver.setTimeStepSafetyFactor(1.0) model.add(solver) \end{PythonListing} \subsection{Parallel solver} \begin{PythonListing} # Parallel solver with two cores model.parallel.setCores(2) \end{PythonListing} \subsection{Solving procedure} \begin{PythonListing} # Run the main solver model.solve() \end{PythonListing} \section{Vectorial SVG contourplots}
{ "alphanum_fraction": 0.7468248368, "avg_line_length": 47.5387840671, "ext": "tex", "hexsha": "57740d84dd541d83c476a072085a26875270bdd4", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-06-28T16:46:26.000Z", "max_forks_repo_forks_event_min_datetime": "2021-03-15T07:13:28.000Z", "max_forks_repo_head_hexsha": "f346c0888059784c3f56b853e8593b71fc3dd708", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "pantale/DynELA-v3.0", "max_forks_repo_path": "Documentation/ProgrammingLanguage.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "f346c0888059784c3f56b853e8593b71fc3dd708", "max_issues_repo_issues_event_max_datetime": "2021-06-28T16:54:58.000Z", "max_issues_repo_issues_event_min_datetime": "2021-06-28T16:54:58.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "pantale/DynELA", "max_issues_repo_path": "Documentation/ProgrammingLanguage.tex", "max_line_length": 739, "max_stars_count": 5, "max_stars_repo_head_hexsha": "f346c0888059784c3f56b853e8593b71fc3dd708", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "pantale/DynELA", "max_stars_repo_path": "Documentation/ProgrammingLanguage.tex", "max_stars_repo_stars_event_max_datetime": "2020-09-21T18:27:39.000Z", "max_stars_repo_stars_event_min_datetime": "2019-12-13T14:12:43.000Z", "num_tokens": 6339, "size": 22676 }
%!TEX root = ../report.tex % % Introduction % \section{Introduction} \label{sec:introduction} % General description of the problem and its context, current % solutions, and road map of the project. % 2 - 3 pages % TODO: Mobile proximity based apps (Special kind of context aware), Explain context-aware, Examples % Technologies to know if the user is nearby some POI % Development of proximity-based apps % Smart spaces % TODO: What are native and web mobile apps % TODO: What makes more sense to proximity-based apps % (Native vs Web) % This paper is about... % Present next sections % Problem The use of context-aware mobile applications (apps) has been increasing. These apps use context, acquired from one or more sensors and other user's data, such as the calendar. For instance, it is possible to build an app that puts the phone in silent mode if the user is in a meeting. Proximity-based apps are context-aware apps that allow the user to interact with the app if he is nearby some point of interest (POI). Table \ref{tab:app_comparison} shows some statistics about the most popular apps in Google Play Store, including examples of proximity-based apps, such as Foursquare\footnote{http://foursquare.com}, Skout\footnote{http://www.skout.com/} and Tinder\footnote{http://www.gotinder.com/} \input{tables/app_comparison.tex} Facebook is the app with the greatest number of downloads. It has, at least, \num{1e9}. Twitter and Instagram have, at least, \num{1e8} downloads. One of the proximity-based apps has, at least, \num{1e7} downloads. Which means, one of the proximity-based apps represents 1/100 of Facebook's downloads and 1/10 of Twitter's and Instagram's downloads. In terms of rating, all apps have similar values % Possible solutions To know if the user is nearby some point of interest, we need to know the user's location. We can use the Global Positioning System (GPS)\cite{masumoto1993global} to get this information. Most apps use this technology because most smartphones have a GPS receiver. %Show some statistics about smartphones with GPS Unfortunately, GPS cannot be used indoors because the signal can be very weak inside a building. To make proximity-based apps work properly indoors, we need to use other technologies. %----------------------------- % BLE %----------------------------- \subsection{Bluetooth Low Energy} \label{sub:bluetooth_low_energy} Bluetooth is a wireless, short-range, communication technology. Bluetooth Low Energy (BLE)\footnote{http://www.bluetooth.com/Pages/Bluetooth-Smart.aspx} is an improvement of classic Bluetooth since it consumes very low power, allowing it to be used by smaller devices. And that is the main reason for this technology to start to be used for proximity-based mobile apps, BLE Beacons are devices, that use BLE, to broadcast a Universally Unique Identifier (UUID). These devices use the iBeacon\footnote{http://developer.apple.com/ibeacon/} protocol, which was created by Apple\texttrademark. In this protocol the beacons advertise the following sets of bytes (total 20 bytes, 160 bits): \begin{description} \item[UUID] has 16 bytes and it is used to differentiate a large group of related beacons \item[Major] has 2 bytes and it is used to distinguish a smaller subset of beacons within the larger group \item[Minor] has 2 bytes and it is used to distinguish individual beacons within the smaller subset \end{description} A smartphone that has BLE can get this signal with the mentioned bytes. But, only smartphones with Bluetooth, at least, version 4, have BLE. For instance, a store owner wants to advertise a promotion to potential customers that are nearby his store. To achieve this goal, the store owner would need to put a beacon inside his store and his customers would need an app that would get the beacon's signal and show a notification in the customers' smartphones, as shown in figure \ref{fig:store_example}. \begin{figure}[!ht] \centering \includegraphics[width=0.9\textwidth]{img/store_example} \caption{Interaction between the user's smartphone and a beacon inside a store} \label{fig:store_example} \end{figure} To develop apps that use beacons, the BLE API can be used, provided by the mobile platform or simply by the Software Development Kit (SDK) from the beacons vendor. The example of the store owner is a simple one. The app just gets the signal and shows a notification. However, if the store owner has more than one store and wants to show a different promotion depending on each store, the app would need to do more than just get the beacon's signal. After getting the signal, the app would get the promotion data for that beacon, from a backend. The developer of this app, would need to write the code that gets the beacon's signal and also the code that will get the information from the backend. Another example could be an app for a museum. The owner of a museum wants to show information regarding the exhibition, in the visitors' smartphones, about the pieces that are in a given room. He would need one beacon for each piece and an app that would get the beacon's signal and use a backend to get the right information about a given piece. The museum needs to advertise, to its visitors, that such an app exists and also, tell them to download the app before they enter the museum. For both scenarios the developers will have to write similar code to get the beacon's signal and get data from a backend. In the scenarios described above, each scanned beacon will trigger a notification that will show some content to the user. If developers want to offer more complex interactions than just show content, their job gets even harder. For instance, a restaurant's owner wants to offer to his customers the possibility to make their orders, as soon as they sit, from their smartphones. Each beacon can represent a table, but the user interface will be the same, for all tables. Besides writing the code to scan for nearby beacons, developers need to write the code to make the orders using some service. For users, this scenario means that they need to install one app for each restaurant %----------------------------- % Smart Places %----------------------------- \subsection{Smart Places} \label{sub:smart_places} Before going further, we will introduce the concept of Smart Place, which is a place that can, somehow, interact with users nearby and then allows users to interact with it. In this context, a Smart Place have beacons and the users have apps, installed in their smartphones, that can interact with them and show relevant information to the user. With this definition, we can say, for instance that the owner of the store, wants to turn his store into a Smart Place. The same applies to the museum owner. The user would need to install two apps, one for the store and another one for the museum. If there is another Smart Place, the user would need to install another app. Any mobile app can be a native or a web app. Native apps are apps that we have to install and that can access device's features, such as BLE. Web apps are web applications, designed for mobile devices, that run on the device's browser. Table \ref{tab:native_vs_web} compares some characteristics of both kinds of apps. % Table Native vs web \input{tables/native_vs_web.tex} Mobile web apps do not need to be installed but they do not have access to same device's features that native apps have. Mobile apps for Smart Places, according to our definition, need to be native, because we need to get signals from nearby beacons. For that, we need to use bluetooth, which is not available in web apps, since they run in the browser. A solution to allow developers to get the best of both sides is proposed here: On one side, native apps can detect nearby beacons. On the other side, web apps do not need to be installed. %----------------------------- % Framework %----------------------------- \subsection{Framework} \label{sub:framework} % Solution being proposed here aims to % Users: Interact with any Smart Place % withou downloading one app for each one % Developers: Develop apps based on POIs % using web technologies % Android app for users, owners % Backend for... % Alternative to QR Code without need of scanning The solution being proposed here aims to allow users to interact with any Smart Place without the need of download and install one native mobile app for each Smart Place, in their mobile devices. Developers will be able to develop their applications, that are based on POIs, using web technologies, such as HTML\footnote{http://www.w3.org/TR/html}, CSS\footnote{http://www.w3.org/TR/css-2010} and Javascript\footnote{http://www.w3.org/standards/webdesign/script.html} and they will not need to write code related with the BLE beacons neither the backend where the data, for each POI, is stored. To achieve this goal, an Android\footnote{http://www.android.com/} app will be part of the solution. This mobile app will be developed to be used by users and by people who own a space and want to install beacons in their spaces to offer some kind of interaction. QR Codes already provide a similar solution to deliver content to mobile devices. However, the user needs to see the code printed somewhere and scan it using an app to read this kind of codes. Also, they only allow the user to receive content, such as, a web page or a link to a video. This project can be considered as an alternative to QR Codes but without requiring the user to scan any code. The only requirement is that the user needs to turn on the Bluetooth receiver of his mobile device. In next section, we describe the main objectives of the project being proposed. Section \ref{sec:related_work} describes related work about some examples of apps that use BLE Beacons and the development and deployment of context-aware mobile apps. In section \ref{sec:architecture} the architecture of our solution is explained, its main components and the role of each one. Section \ref{sec:evaluation} is about how the solution will be evaluated. Finally, section \ref{sec:conclusions} has a summary of the main ideas of the entire document.
{ "alphanum_fraction": 0.7716273146, "avg_line_length": 38.3721804511, "ext": "tex", "hexsha": "019917cd1ccde469df093d0532163ae59e34ec4b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "67ff67c69a4208146ac37cdae0bb4140ee794897", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "samfcmc/master-project", "max_forks_repo_path": "sections/3-introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "67ff67c69a4208146ac37cdae0bb4140ee794897", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "samfcmc/master-project", "max_issues_repo_path": "sections/3-introduction.tex", "max_line_length": 102, "max_stars_count": null, "max_stars_repo_head_hexsha": "67ff67c69a4208146ac37cdae0bb4140ee794897", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "samfcmc/master-project", "max_stars_repo_path": "sections/3-introduction.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2371, "size": 10207 }
\renewcommand{\thechapter}{2} \chapter{Comparing Whole-Genome Assemblies} \section{Introduction} % The genome sequence of an organism is a critical resource for % biologists trying to understand the organism's function and % evolution. Obtaining this sequence is difficult as modern sequencing % technologies can only ``read'' small pieces of the genome (called % \emph{reads}). The fact that these tiny \emph{reads} (under a few % thousands of basepairs in length) can be glued together to reconstruct genomes % comprising millions to billions of basepairs is by no means evident % and was the subject of vigorous scientific debate during the early % days of sequencing technologies~\cite{green1997against,weber1997human}. The modern genomic revolution was in no small part made % possible by the development of algorithms and computational tools called % \emph{genome assemblers} able to reconstruct near-complete % representations of a genome's sequence from the fragmented data % generated by sequencing instruments. Despite tremendous advances made % over the past 30 years in both sequencing technologies and assembly % algorithms, genome assembly remains a highly difficult computational % problem. In all but the simplest cases, genome assemblers cannot % fully and correctly reconstruct an organism's genome. Instead, the % output of an assembler consists of a set of contiguous sequence % fragments (\emph{contigs}), which can be further ordered and oriented % into \emph{scaffolds}, representing the relative placement of the % contigs, with possible intervening gaps, along the genome. % why assembling is hard? % Theoretical analyses of the assembly problem, commonly formulated as % an optimization problem within an appropriately defined graph, have % shown that assembly is % NP-hard~\cite{myers1995,medvedev2007computability}, i.e., finding the % correct optimal solution may require an exhaustive search of an % exponential number of possible solutions. The difficulty of genome % assembly is due to the presence of repeated DNA % segments (\emph{repeats}) in most genomes. Repeats longer than the length of the sequenced reads lead to ambiguity in the reconstruction of the genome % -- many different genomes can be built from the same set of % reads~\cite{nagarajan2009complexity,kingsford2010assembly}. % % As a result, practical implementations of assembly algorithms (such as % ABySS~\cite{ABySS}, Velvet~\cite{Velvet}, % SOAPdenovo~\cite{SOAPdenovo}, etc.) return just an approximate % solution that either contains errors, or is fragmented, or both. % Ideally, in a genomic experiment, assembly would be followed by the % scrupulous manual curation of the assembled sequence to correct the % hundreds to thousands of errors~\cite{salzberg2005misassemblies}, and % fill in the gaps between the assembled % contigs~\cite{nagarajan2010finishing}. Despite the value of fully % completed and verified genome sequences~\cite{fraser2002value}, the % substantial effort and associated cost necessary to conduct a % finishing experiment to its conclusion can only be justified for a % few high-priority genomes (such as reference strains or model % organisms). The majority of the genomes sequenced today are % automatically reconstructed in a ``draft'' state. Despite the fact % that valuable biological conclusions can be derived from draft % sequences~\cite{branscomb2002high}, these genomes are of uncertain % quality~\cite{chain2009genome}, possibly impacting the conclusions of % analyses and experiments that rely on their primary sequence. % Assessing the quality of the sequence output by an assembler is, thus, % of critical importance, not just to inform downstream analyses, but % also to allow researchers to choose from among a rapidly increasing % collection of genome assemblers. Despite apparent incremental % improvements in the performance of genome assemblers, none of the % software tools available today outperforms the rest in all assembly % tasks. As highlighted by recent assembly % bake-offs~\cite{earl2011assemblathon,salzberg2011gage}, different % assemblers ``win the race'' depending on the specific characteristics % of the sequencing data, the structure of the genome being assembled, % or the specific needs of the downstream analysis process. % Furthermore, these recent competitions have highlighted the inherent % difficulty of assessing the quality of an assembly. More % specifically, all assemblers attempt to find a trade-off between % contiguity (the size of the contigs generated) and accuracy of the % resulting sequence. Evaluating this trade-off is difficult even when % a gold standard is available, e.g., when re-assembling a genome with % known sequence. In most practical settings, a reference genome % sequence is not available, and the validation process must rely on % other sources of information, such as independently derived data from % mapping experiments~\cite{zhou2007validation}, or from transcriptome % sequencing~\cite{adamidi2011novo}. Such data are, however, often not % generated due to their high cost relative to the rapidly decreasing % costs of sequencing. Most commonly, validation relies on \emph{de % novo} approaches based on the sequencing data alone, which include % global ``sanity checks'' (such as gene density, expected to be high in % bacterial genomes, measured, for example, through the fraction of the % assembled sequence that can be recognized by PFAM % profiles~\cite{genovo2011}) and internal consistency % measures~\cite{amosvalidate2008} that evaluate the placement of reads % and mate-pairs along the assembled sequence. % % The validation approaches outlined above can highlight a number of % inconsistencies or errors in the assembled sequence, information % valuable as a guide for further validation and refinement experiments, % but difficult to use in a comparative setting where the goal is to % compare the quality of multiple assemblies of a same dataset. For % example, even if a reference genome sequence is available, while all % differences between the reassembled genome and the reference are, at % some level, assembly mistakes, it is unclear whether one should weigh % single nucleotide differences and short indels as much as larger % structural errors (e.g., translocation or large scale copy-number % changes)~\cite{earl2011assemblathon} when comparing different % assemblies. Furthermore, while recent advances in visualization % techniques, such as the FRCurve of Narzisi et % al.~\cite{FRC2011,vezzi2012feature}, have made it easier for % scientists to appropriately visualize the overall tradeoff between % assembly contiguity and correctness, there exist no established % approaches that allow one to appropriately weigh % the relative importance of the multitude of assembly quality measures, % many of which provide redundant information~\cite{vezzi2012feature}. Here we propose an objective and holistic approach for evaluating and comparing the quality of assemblies derived from a same dataset. Our approach defines the quality of an assembly as the likelihood that the observed reads are generated from the given assembly, a value which can be accurately estimated by appropriately modeling the sequencing process. This basic idea was formulated in the 1990's in the pioneering work of Gene Myers~\cite{myers1995}, where he suggested the correct assembly of a set of reads must be consistent (in terms of the Kolmogorov-Smirnoff test statistic) with the statistical characteristics of the data generation process. The same basic idea was further used in the arrival-rate statistic (A-statistic) in Celera assembler~\cite{CeleraAssembler} to identify collapsed repeats, and as an objective function in quasi-species (ShoRAH~\cite{SHORAH}, ViSpA~\cite{VISPA}), metagenomic (Genovo~\cite{genovo2011}), general-purpose assemblers~\cite{medvedev2009maximum}, and recent assembly evaluation frameworks (ALE ~\cite{clark2013ale}, CGAL~\cite{rahman2013cgal}). In this chapter, we will describe in detail a mathematical model of the sequencing process that takes into account sequencing errors and mate-pair information, and show how this model can be computed in practice. We will also show that this \emph{de novo} probabilistic framework is able to automatically and accurately reproduce the reference-based ranking of assembly tools produced by the Assemblathon~\cite{earl2011assemblathon} and GAGE~\cite{salzberg2011gage} competitions. Our work is similar in spirit to the recently published ALE~\cite{clark2013ale} and CGAL~\cite{rahman2013cgal}; however, we provide here several extensions of practical importance. First, we propose and evaluate a sampling-based protocol for computing the assembly score which allows the rapid approximation of assembly quality, enabling the application of our methods to large datasets. Second, we evaluate the effect of unassembled reads and contaminant DNA on the relative ranking of assemblies according to the likelihood score. Finally, we will demonstrate the use of our probabilistic quality measure as an objective function in optimizing the parameters of assembly programs. The software implementing our approach is made available, open-source and free of charge, at: \url{http://assembly-eval.sourceforge.net/}. %%%%%%%%%%%%%%%% %% Theory %% %% \section{Methods} \label{theory} \subsection{Theoretical foundation for probabilistic evaluation} In this section, we formalize the probabilistic formulation of assembly quality and the model of the sequencing process that allows us to compute the likelihood of any particular assembly of a set of reads. We will show that the proposed probabilistic score is correct in the sense that the score is maximized by the true genome sequence. \subsubsection{Likelihood of an assembly} Let $A$ denote the event that a given assembly is the true genome sequence, and let $R$ denote the event of observing a given set of reads. In the following, we will use the same symbol to denote the assembly sequence and the event of observing the assembly. We will also use the same symbol to denote the set of reads and the event of observing the set of reads. According to Bayes' rule, given the observed set of reads, the probability of the assembly can be written as: \begin{equation} \Pr[A \vert R] = \frac{\Pr[R \vert A] \Pr[A]}{\Pr[R]} \end{equation} \noindent where $\Pr[A]$ is the \emph{prior probability} of observing the genome sequence $A$. Any prior knowledge about the genome being assembled (e.g., approximate length, presence of certain genes, etc.) can be included in $\Pr[A]$; however, for the purpose of this paper, we will assume that this prior probability is constant across the set of ``reasonable'' assemblies of a same set of reads. Given commonly available information about the genomes, formulating a precise mathematical framework for defining $\Pr[A]$ is an extensive endeavor beyond the scope of this paper. Similarly, $\Pr[R]$ is the prior probability of observing the set of reads $R$. Since our primary goal is to compare multiple assemblies of a same set of reads, rather than to obtain a universally accurate measure of assembly quality, we can assume $\Pr[R]$ is a constant as well. Thus, for the purpose of comparing assemblies, the values $\Pr[A \vert R]$ and $\Pr[R \vert A]$ are equivalent. The latter, the posterior probability of a set of reads, given a particular assembly of the data, can be easily computed on the basis of an appropriately defined model of the sequencing process and will be used in our paper as a proxy for assembly quality. Under the assumption that individual reads are independent of each other (violations of this assumptions in the case of mate-pair experiments will be discussed later in this section), $\Pr[R \vert A] = \prod_{r \in R} \Pr[r \vert A]$. If the set of reads is unordered, we need to account for the different permutations that generate the same set of reads. As this value is a constant for any given set of reads, we ignore it in the rest of our paper. $\Pr[r \vert A]$, hereafter referred to as $p_r$, can be computed using an appropriate model for the sequencing process. Throughout the remainder of the paper, we will discuss increasingly complex models and their impact on the accuracy of the likelihood score. \subsubsection{True genome obtains the maximum likelihood} \label{maximizing_likelihood} Any useful assembly quality metric must achieve its maximum value when evaluating the true genome sequence; otherwise, incorrect assemblies of the data would be preferred. We prove below that the likelihood measure proposed in our paper satisfies this property. \edit{Assuming that we have a set of reads $R$ from the true genome, produced by generating exactly one single-end read from each location in the genome without errors and with a fixed length. Given the set of reads $R$, the probability a particular read is generated from the true genome is precisely the number of times the read occurs in $R$ divided by the size of $R$ (note that multiple reads can have the same sequence, e.g., when generated from repeats). Let $N_s$ denote number of times that the sequence $s$ occurs in $R$, and $q_s = N_s/|R|$ denote the probability that sequence $s$ is generated from the true genome. To show that the true genome maximizes the likelihood score, let us assume that we have some assembly $A$ and $p_s$ is the probability that the sequence $s$ is generated from the assembly $A$.} \edit{Given assembly $A$, our likelihood score is then the product of ${p_s}^{N_s}$ over all sequences $s$ in $S$, which can be rewritten as $\prod_{s \in S} {p_s}^{q_s|R|} = (\prod_{s \in S} {p_s}^{q_s})^{|R|}$. Now, note that since $|R|$ is a fixed constant, maximizing the likelihood score is equivalent to maximizing} \begin{align*} \prod_{s \in S} {p_s}^{q_s} \end{align*} %If $p_s$ and $q_s$ are probability distributions over the set of sequences $S$, then one can show that this expression is maximized when $p_s = q_s$ for all sequences $s \in S$. %This can be proven by writing the log of the previous expression in terms of the Kullback-Leibler divergence and utilizing its properties. Note $\log (\prod_{s \in S} {p_s}^{q_s}) = \sum_{s \in S} q_s \log p_s = \sum_{s \in S} q_s \log (p_s/q_s) + \sum_{s \in S} q_s \log q_s = - D_{KL}(Q||P) - H(Q)$, where $D_{KL}(Q||P)$ is the KL-divergence for distributions $Q = \{q_s | s \in S \}$ and $P = \{p_s | s \in S\}$, and $H(Q)$ is the Shannon entropy of $Q$. Since the KL-divergence is always positive and only equal to 0 when $Q = P$ (i.e. $q_s = p_s$ for all $s \in S$), then we can conclude that the likelihood score is maximized when $q_s = p_s$ for all $s \in S$. To complete the proof, note that if the assembly $A$ is the true genome, then the probabilities $p_s$ associated with $A$ are precisely equal to $q_s$ for all sequences $s \in S$, so the likelihood score is maximized by the true genome. %Any useful assembly quality metric must achieve its maximum value when %evaluating the true genome sequence; otherwise, incorrect assemblies of %the data would be preferred. We prove below that the likelihood %measure proposed in our paper satisfies this property. %Let $Q$ denote the probability distribution of observing reads from %the true genome. As the size of the read set grows, the distribution %of the observed reads approaches $Q$. Let $P$ denote the probability %distribution of observing reads from a specific assembly of the same %%data. $P$ can be calculated given the sequence of the assembly and %the reads. %The average probability of the set of observed reads being generated from an assembly is %\begin{align*} % \prod_{s \in \Sigma^*} {p_s}^{q_s}, %\end{align*} %\noindent where $p_s$ and $q_s$ are the probabilities of the sequence $s$ according to the observed and true distributions, respectively. The likelihood can be re-written as \begin{align*} \log (\prod_{s \in S} {p_s}^{q_s}) & = \sum_{s \in S} q_s \log p_s \\ & = \sum_{s \in S} q_s \log (\frac{p_s}{q_s}) + \sum_{s \in S} q_s \log q_s \\ & = - D_{KL}(Q||P) - H(Q), \end{align*} %\log (\prod_{s \in \Sigma^*} {p_s}^{q_s}) & = \sum_{s \in \Sigma^*} q_s \log p_s \\ %& = \sum_{s \in \Sigma^*} q_s \log (\frac{p_s}{q_s}) + \sum_{s \in \Sigma^*} q_s \log q_s \\ %& = - D_{KL}(Q||P) - H(Q), \noindent where $D_{KL}(Q||P)$ is the KL-divergence for the distributions $Q$ and $P$, and $H(Q)$ is the Shannon entropy of $Q$. Since the KL-divergence is always non-negative and only equal to 0 if and only if $Q = P$, the average probability is maximized if the assembly is equal to the true genome. Even though the true genome does maximize the likelihood in this model, there may be other assemblies that achieve the same optimal score as long as these assemblies yield probabilities $p_s$ which are equal to the probabilities $q_s$ for every sequence $s$. This can happen, for example, in the case of a misassembly that is nonetheless consistent with the generated reads. This situation highlights the loss of information inherent in modern sequencing experiments -- without additional long-range information, the information provided by the reads themselves is insufficient to distinguish between multiple possible reconstructions of a genome~\cite{kingsford2010assembly}. \subsubsection{Error-free model for fragment sequencing} \label{error_free_model} The most basic model for the sequencing process is the \emph{error-free model}. In this model, we assume reads of a given fixed length (a more general read length distribution can be included in the model but would not impact comparative analyses of assemblies derived from a same set of reads). We further assume that reads are uniformly sampled across the genome, i.e., that every position of the genome is equally likely to be a starting point for a read. This simplifying assumption is made by virtually all other theoretical models of genome assembly, despite the biases inherent to all modern sequencing technologies. A more accurate, technology-dependent, model can be obtained by including additional factors that account, for example, for DNA composition biases. For the purpose of generality, we restrict our discussion to the uniform sampling model. Furthermore, for the sake of simplicity, we assume (1) that the true genome consists of a single circular contiguous sequence, (2) that our assembly is also a single contig, and (3) that every read can be mapped to the assembly. We will later discuss extensions of our model that relax these assumptions. Under these assumptions, we can compute the probability of a read $r$ given the assembled sequence as: \begin{equation} \label{error_free_probability} p_r = \frac{n_r}{2L} \end{equation} where $n_r$ represents the number of places where the read occurs in the assembled sequence of length $L$. The factor $2$ is due to the fact that reads are sampled with equal likelihood from both the forward and reverse strands of a DNA molecule. This formulation was previously used by Medvedev \emph{et al.}~\cite{medvedev2009maximum} to define an objective function for genome assembly. \subsection{A realistic model of the sequencing process} The error-free model outlined above makes many simplifying assumptions that are not representative of real datasets. Here we demonstrate how the model can be extended to account for artifacts such as sequencing errors, mate-pair information, assemblies consisting of multiple contigs, and the presence of un-mappable reads. \subsubsection{Sequencing errors} \label{methods_errors} All current technologies for sequencing DNA have a small but significant probability of error. Here we focus on three common types of errors: the insertion, deletion, and substitution of a nucleotide. In the error-free model, the probability of a read having been generated from a position $j$ in the sequence is one if the read exactly matches the reference at that position and zero otherwise. We now extend this model such that the probability of each read having been generated from any position $j$ of the reference is a real value between zero and one, representing the likelihood that a sequencing instrument would have generated that specific read from that specific position of the reference. This value clearly depends on the number of differences between the sequence of the read and the sequence of the reference at position $j$. Given the assembled sequence, the probability of a particular read will be the cumulative probability of the read across all possible locations in the genome. Specifically, let us denote the probability that read $r$ is observed by sequencing the reference, \emph{ending} at position $j$ by $p_{r,j}$. Then, the total probability of the read $r$ is \begin{align} \label{prob_error_sum} p_r = \frac{{\sum_{j=1}^{L} p^{\text{forward}}_{r,j}} + {\sum_{j = 1}^{L} p^{\text{reverse}}_{r,j}}}{2L} \end{align} The individual probabilities $p_{r,j}$ can be computed if we do not model insertion and deletion errors and only allow substitution errors which occur with probability $\epsilon$. The per-base probability of a substitution error can be set individually for each based on the quality value produced by the sequencing instrument. Then, $p_{r, j} = \epsilon^{s}(1 - \epsilon)^{l - s}$ , where $s$ is the number of substitutions needed to match read $r$ to position $j$ of the reference sequence. In the more general case, $p_{r,j}$ values can be computed using dynamic programming. \subsubsection{Exact probability calculation via dynamic programming} \label{methods_dynamic} For a model of the sequencing process that allows insertions, deletions, and substitutions with specific probabilities, we can exactly compute probability, $p_r = \Pr[r \vert A]$, of observing a read $r$ given an assembly $A$ using a dynamic programming algorithm. In general, we want to find the sum of the probabilities of all possible alignments of a read to a position of the assembly. \begin{figure}[h!] \begin{center} \includegraphics[]{figures/fig_1_optimal_alignments} \end{center} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{quote} \caption[Multiple optimal read alignments.]{Two different optimal alignments of the read \textbf{ACG} to the assembly \textbf{ACCG}. Our dynamic programming algorithm finds the sum of the probabilities of all possible alignments.} \label{different_optimal_alignments} \end{quote} \end{figure} \renewcommand{\baselinestretch}{2} \small\normalsize %\newpage % \begin{figure} % \begin{center} % \includegraphics[width=4.572in]{metagenome} % \end{center} % \renewcommand{\baselinestretch}{1} % \small\normalsize % \begin{quote} % \caption[The metagenome of an environment]{The metagenome of an environment can be viewed as the concatenation of the organisms found in the environment whose multiplicity is determined by their abundance. \label{fig:metagenome}} % \end{quote} % \end{figure} % \renewcommand{\baselinestretch}{2} % \small\normalsize % \newpage % \begin{figure}[h!]%tb % \begin{center} % \includegraphics[]{figures/fig_1_optimal_alignments} % \end{center} % \caption{Multiple optimal read alignments. Two different optimal alignments of the read \textbf{ACG} to the assembly \textbf{ACCG}. Our dynamic programming algorithm finds the sum of the probabilities of all possible alignments.} % \label{different_optimal_alignments} % \end{figure} The number of such possible alignments grows exponentially as a function of read length. Most of those alignments have a very small probability. However, several alignments may have probabilities that are equal or close to the optimal. For example, the two alignments of the same pair of sequences in Figure~\ref{different_optimal_alignments} have the same probability and are both optimal alignments. We use a dynamic programming algorithm (similar to the ``forward'' algorithm in Hidden Markov Models) to efficiently calculate the sum of the probabilities of all alignments of a read to the assembly as follows. In the formula~\eqref{prob_error_sum}, $p^{\text{forward}}_{r,j}$ and $p^{\text{reverse}}_{r,j}$ are the sum of the probabilities of \emph{all} possible alignments of the read $r$ to, respectively, the reference and its reverse complement, ending at position $j$. We define $T[x,y]$ as the probability of observing prefix $[1 \ldots y]$ of the read $r$, if $y$ bases are sequenced from the reference, ending at position $x$. Therefore, $p_{r, j} = T[j, l]$. $T[x, 0]$ represents the probability of observing an empty sequence if we sequence zero bases and is set to 1. $T[0, y]$ represents the probability of observing prefix $[1 \ldots y]$ of the read if $y$ bases are sequenced from the reference, ending at position $0$ (before the beginning), and is set to 0. For $x \geq 1$ and $y \geq 1$, $T[x, y]$ is recursively defined: \begin{align} \label{dp_single} T[x, y] = & \quad T[x - 1, y - 1] \Pr[\operatorname{Substitute}(A[x], r[y])] \\ & + T[x, y - 1] \Pr[\operatorname{Insert}(r[y])] \notag\\ & + T[x - 1, y] \Pr[\operatorname{Delete}(A[x])], \notag \end{align} \noindent where $r[y]$ and $A[x]$ represent the nucleotides at positions $y$ and $x$ of the read $r$ and the assembly $A$, respectively. $\Pr[\operatorname{Substitute}(A[x], r[y])]$ is the probability of observing the nucleotide $r[y]$ by sequencing the nucleotide $A[x]$. In our experiments, we did not distinguish between different types of errors and considered their probability to be $\epsilon$ and the probability of observing the correct nucleotide to be $1 - \epsilon$. The dynamic programming algorithm outlined above has a running time of $O(lL)$ per read. Even though the running time is polynomial, it is slow in practice. However, we can speed it up by using alignment seeds. The seeds would give us the regions of the assembly where a read may align with high probability. We can apply the dynamic programming only to those regions and get a very good approximate value of the total probability. We use exact seeds ($k$-mers) of a given length to build a hash index of the assembly sequence. Then, each read is compared to the regions where it has a common $k$-mer with the assembly sequence. \subsubsection{Mate pairs} Many of the current sequencing technologies produce paired reads -- reads generated from the opposite ends of the same DNA fragment. This information is extremely valuable in resolving genomic repeats and in ordering the contigs along long-range scaffolds; however, the paired reads violate the assumption that reads are sampled independently, that we made in the discussion above. To address this issue, we can use the pairs rather than the individual reads as the underlying objects from which the assembly likelihood is computed. To address the possibility that assembly errors may result in violations of the constraints imposed by the paired reads, we only consider pairs for which both ends align to a same contig or scaffold within the constraints imposed by the parameters of the sequencing experiment. Any pairs that violate these constraints get classified as unassembled. Note that in addition to sequencing errors, we now also handle fragment sizing errors -- deviations of the estimated distance between paired reads from the distance implied by the sequencing experiment. We model the distribution of fragment sizes within a same library by a normal distribution, using user-supplied parameters, and use this information to appropriately scale the likelihood estimate for each possible placement of a mate pair along the genome. We modify the dynamic programming recurrence from formula~\eqref{dp_single} to handle the probability calculation for the paired reads as follows. The probability of the first read in the pair is calculated as the same as in the formula~\eqref{dp_single}. For the second read, we adjust the dynamic programming to ensure that it is aligned within a certain distance downstream of the alignment of the first read. We modify the first column of the dynamic programming table of the \emph{second} read in the pair to take into account the distance from the first read. Formally, given a paired read, we define $T_2[x,y]$ as the probability of observing prefix $[1 \ldots y]$ of the $2$nd read in the pair, if $y$ bases are sequenced from the reference, ending at position $x$. %Given paired reads $1$ and $2$, we define $T_1[x,y]$ and $T_2[x,y]$ as the probabilities of observing prefixes $[1 \ldots y]$ of reads $1$ and $2$, if $y$ bases are sequenced from the reference ending at position $x$ from reads $1$ and $2$, respectively. Assume that the second read occurs after the first read and is separated by a normally-distributed distance with mean $\mu$ and with a standard deviation $\sigma$. %, then all of read $1$ must be aligned before read $2$. Therefore, \begin{align} T_2[x, 0] = \sum_{i=1}^{x}{\Pr[\operatorname{insert}(x-i)|N(\mu,\sigma)))] + T_1[x-i,l]}, \end{align} \noindent where $\Pr[\operatorname{insert}(n)|N(\mu,\sigma)))]$ is the probability of observing an insert size of length $n$ from a normal distribution with parameters $\mu$ and $\sigma$, and $l$ is the length of the first read in the pair. Instead of using two tables, we can concatenate the read pair together with a special character ($M$), which will signal when the insert size should be taken into account. %Let $r$ represent a concatenation of paired reads with a special character $M$ separating them. %Let $r[y]$ represent the nucleotide at position $y$ of the paired read $r$. %Let $\Pr[\operatorname{insert}(n)|\mu,\sigma))]$ be the probability obtaining an insert size of length $n$ from a normal distribution with parameters $\mu$ and $\sigma$. For $x \geq 1$ and $y \geq 1$, $T[x, y]$ is recursively defined as follows: \begin{equation} \begin{aligned} T[x, y] = \quad \text{if }r[y] == M&\begin{cases} \sum_{i=1}^{x}{\Pr[\operatorname{insert}(x-i)|N(\mu,\sigma)))] + T[x-i,y-1]} \end{cases} \\ \text{else }&\begin{cases} T[x - 1, y - 1] \Pr[\operatorname{Substitute}(A[x], r[y])] \\ \quad + T[x, y - 1]\Pr[\operatorname{Insert}(r[y])] \\ \quad + T[x - 1, y]\Pr[\operatorname{Delete}(A[x])] \end{cases} \end{aligned} \end{equation} \subsubsection{Assemblies containing more than one contig} As we mentioned in the introduction, the output of an assembler usually consists of a (large) set of contigs rather than one single contig, representing the genome being assembled. In the extreme case, an ``assembler'' may return the set of unassembled input reads (\edit{or the set of all k-mers in De Bruijn-based assemblers}) as its output. Our likelihood score must be modified to account for such fragmented assemblies. In practice, most assemblers join contigs only if they overlap by more than a certain number of bases; however, we only consider the case where contigs are non-overlapping substrings of the true genome. In this case, the length of the original genome must be \emph{at least} the sum of the lengths of the contigs, that is, $\sum L_j$, where $L_j$ is the length of the $j$th contig. Therefore, the probability of every read is at most: \begin{equation} \begin{aligned} \frac{n_r}{2\sum L_j} \end{aligned} \end{equation} Overlapping contigs can be handled by reducing the length of the contigs by a value representing the minimum overlap required by the assembler, as performed, for example, in Genovo~\cite{genovo2011}. %In practice, most assemblers join contigs only if they overlap by more %than a certain number of bases, i.e., distinct contigs may overlap. %Assuming contigs are allowed to overlap by less than $o$ bases %(minimum overlap necessary to merge adjacent contigs), the length of %the genome from which the reads were sampled must be greater than or %equal to $\sum (L_j - o)$. Thus, the probability of a read is %estimated by: %\begin{align*} % \frac{n_r}{2\sum (L_j - o)} %\end{align*} %{\bf HOW DO THESE CHANGE IN THE FORWARD ALGORITHM FORMULATION?} \subsubsection{Reads that do not align well} \label{methods_practical_unassembled} In practice, popular assemblers do not incorporate every read in the assembly. Possible reasons include assembly errors (such as collapsed tandem repeats), reads with high error rates, or contamination in the DNA sample. These ``singleton'' or ``chaff'' reads cannot be modeled by our likelihood approach as the likelihood of any assembly that does not incorporate every read is zero. When sequencing errors are modeled, every read obtains a non-zero likelihood, even if it does not align to the assembly. Since, in general, a non-trivial fraction of the total set of the reads cannot be mapped to the assembly, by their sheer number, the singleton reads would dominate the probability calculation. To account for this factor, we argue that for any read that does not align well, the overall probability of the assembly should not be lower than the probability of the same assembly when the missing read is appended to its sequence as a separate contig. The effect of such an addition on the overall probability can be calculated as follows. First, the probability of observing this read exactly, $\left(\frac{\Pr[\text{exact match}]}{2L}\right)$, is multiplied to the product of the probabilities of all mapped reads. Second, the probabilities of the mapped reads are decreased slightly due to the increase in the length of the assembled sequence. For simplicity, let us assume an error-free model where each read maps to exactly one position on the assembled sequence. Let $k$ denote the number of the original reads. The ratio between the new probability for all original reads divided by their probability before adding the new read is: \[ \frac{\frac{1}{(L + l)^k}}{\frac{1}{L^k}} = \left(\frac{L}{L + l}\right)^k = \left(1-\frac{l}{L + l}\right)^k \approx e^{-\frac{lk}{L}} \] Therefore, if the probability of observing a read is less than \begin{equation} \label{probability_threshold} \frac{\Pr[\text{exact match}]}{2L}e^{-\frac{l\left\vert R\right\vert}{L}}, \end{equation} we consider this read as ``unmapped'' and use formula~\eqref{probability_threshold} as its probability. The probability of an exact match $\Pr[\text{exact match}]$ is approximated by $\left(1 - \epsilon\right)^{l}$, where $\epsilon$ is the probability of an error (a mismatch, an insertion, or a deletion). \subsection{Performance considerations} \subsubsection{Estimating the average read likelihood by sampling} \label{sampling_reads} Depending on the specific characteristics of the chosen sequencing model, the computation of the probability $\Pr[R \vert A]$ can be expensive for the dataset sizes commonly encountered in current projects (tens to hundreds of millions of reads). In such cases, we can approximate the likelihood of an assembly by using a random subset of the reads $R^\prime \subseteq R$. To counteract the effect of the size of the sample on the computed probability, we define the assembly quality as the geometric mean of the individual read probabilities: \begin{equation} \label{average_probability} \operatorname{AP}(R^\prime) = \left(\prod_{r \in R^\prime} p_r\right)^{\frac{1}{\left|R^\prime\right|}} \end{equation} The logarithm of this value (Log Average Probability (LAP)) is reported in the remainder of the paper as the assembly quality ``score'': \begin{equation} \label{average_log_probability} \operatorname{LAP}(R^\prime) = \log_{10} \left( \operatorname{AP}(R^\prime) \right) = \frac{\sum_{r \in R^\prime} \log_{10} p_r}{\left|R^\prime\right|} \end{equation} In other words, we define the assembly quality as the average log likelihood of the reads given an assembly. This formulation also allows us to estimate the accuracy of the approximate likelihood value produced by sub-sampling the set of reads. According to sampling theory, the distribution of the scores across multiple samples has the mean equal to the true likelihood of the assembly (computed from all the reads) and a standard error proportional to $\frac{1}{\sqrt{\left|R^\prime\right|}}$, i.e., the larger the sample is, the more accurate our estimation is for the likelihood of the true assembly. Since the probability of a read is bounded by formula (\ref{probability_threshold}), the variance of the sample can also be bounded by this value. In practice, we increase the sample size until the assemblies can be unambiguously distinguished by the LAP value. Specifically, we increase the sample size, by binary search, until the LAP values are separated by at least a single standard deviation. The level of subsampling required will, thus, be dependent on the extent of the differences between the assemblies –- for very different assemblies, low levels of subsampling are sufficient. \subsubsection{Approximating the likelihood value using an aligner} \label{methods_aligner} Alternatively, when it is impractical to calculate exact probabilities for large sets of reads, we can approximate these probabilities using fast and memory-efficient alignment search programs, which internally model the sequencing process. We use Bowtie 2~\cite{langmead2012fast} to align the reads to the assembly. However, our programs are easy to adapt for any read alignment tool that stores the alignment results in SAM\cite{li2009sequence} format. For each reported alignment, we use the number of substitutions $s$ to compute the probability $p_{r}$. The probability of this alignment, $p_{r,j}$, can be approximated by $\epsilon^{s}(1 - \epsilon)^{l - s}$ and \begin{equation} \label{} p_{r} = \frac{\sum_{j \in S_r} p_{r,j}}{2L}, \end{equation} where $S_r$ is the set of alignments in the SAM file for the read $r$. We can further extend this equation to mated reads. A pair of mated reads aligns if the distance and orientation of the alignment of the pair are consistent with the experimental design parameters. Given read $i_1$ and its mate $i_2$, we compute $p_{(i_1,i_2)}$ by multiplying the probabilities of individually aligning each mate at their respective positions with the probability that they are separated by their distance from each other. That is, \begin{equation} \label{mate_pair_prob} p_{(i_1,i_2)} = \frac{\sum_{(j_1,j_2) \in S_{(i_1,i_2)}} p_{i_1,j_1} p_{i_2,j_2} \Pr[\textrm{insert}(j_2 - (j_1 + l_1))]}{2(L - l)}, \end{equation} where $p_{i_1, j_1} = \epsilon^{s_1}(1 - \epsilon)^{l_1 - s_1}$. Mate pair insert sizes follow a normal distribution with mean and standard deviation being estimated from the parameters of the sequencing process. Unless otherwise stated, the standard deviation is 10\% of the insert size. If only one of the mates, $i_1$ or $i_2$, maps, the probability $p_{(i_1,i_2)}$ is $0$. We use (\ref{probability_threshold}) to set the probability for this case. In our experiments, Bowtie 2 was used to approximate the read probabilities for the larger datasets; however, it could be substituted with any other aligner. %%%%%%%%%%%%%%%% %% Methods %% %% \subsection{Datasets} The read data for \emph{Rhodobacter sphaeroides} 2.4.1 was downloaded from \url{ http://gage.cbcb.umd.edu/data/Rhodobacter_sphaeroides}, and the corresponding reference sequence was obtained from the NCBI RefSeq database (NC\_007493.1, NC\_007494.1, NC\_009007.1, NC\_007488.1, NC\_007489.1, NC\_007490.1, NC\_009008.1). In addition, two more \emph{Rhodobacter} genomes were selected as reference genomes, specifically \emph{R. sphaeroides} ATCC 17025 (NCBI IDs NC\_009428.1, NC\_009429.1, NC\_009430.1, NC\_009431.1, NC\_009432.1), and \emph{R. capsulatus} SB1003 (NC\_014034.1, NC\_014035.1). The read data for \emph{Stapylococcus aureus} USA300 was downloaded from \url{ http://http://gage.cbcb.umd.edu/data/Staphylococcus_aureus}, and the corresponding reference sequence was obtained from the NCBI RefSeq database (NC\_010063.1, NC\_010079.1, NC\_012417.1). In addition, two more \emph{Stapylococcus} genomes were selected as reference genomes, specifically \emph{S. aureus} 04-02981 (CP001844), and \emph{S. epidermidis} ATCC 12228 (AE015929, AE015930, AE015931, AE015932, AE015933, AE015934, AE015935). The read data for human chromosome 14 was downloaded from \url{http://gage.cbcb.umd.edu/data/Hg_chr14/}, and the corresponding reference sequence was obtained from the NCBI RefSeq database (NC\_000014.8). The Assemblathon~1 competition evaluates assemblies on the simulated short read dataset generated from the simulated 110 Mbp diploid genome. The competition provides sequence libraries with varying insert sizes (200-10,000 bp) and coverage (20-40x). Assemblathon~1 allowed teams to submit multiple entries, but for our analyses, we only examine the top ranking assemblies from each team. The raw reads and the consensus sequence of the top ranking assemblies were downloaded from \url{http://korflab.ucdavis.edu/Datasets/Assemblathon/Assemblathon1/}. Also used in our analyses is the \emph{E. coli} K12 MG1655 dataset, generated using Illumina MiSeq technology (300 bp insert size, 370x coverage) ({\url{http://www.illumina.com/systems/miseq/scientific_data.ilmn}}). %%%%%%%%%%%%%%%% %% Results %% %% \section{Results} \subsection{Performance-related approximations do not significantly affect the likelihood score} The full and exact computation of the assembly likelihood score is computationally intensive and ultimately impractical for the analysis of large genomes sequenced with the next generation technologies. We have highlighted in the Methods section several approaches that can be used to reduce the computational requirements and allow the application of our methods in practical settings, including the computation of the likelihood score on the subsets of the original set of reads and the approximation of the score from the output of an alignment program. As we will show below, our approximations do not affect the comparative ranking of the multiple assemblies derived from a same dataset. %these approximations do not significantly %affect the computed scores, at least in the context of comparing %multiple assemblies derived from a same dataset. \subsubsection{The likelihood score is robust under sampling.} To assess the effect of subsampling, we relied on a collection of the assemblies of the human chromosome 14 made available by the GAGE assembly `bake-off'. We sampled random subsets of increasing size (one trial per size) from the over 60 million reads and computed the likelihood score based only on the sampled reads. \begin{figure}[h!] \begin{center} \includegraphics[width=4.86in]{figures/fig_2_hg_mean_samples} \end{center} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{quote} \caption[LAP-based evaluation of the assemblies for the Human chromosome 14 via sampling.]{LAP-based evaluation of the assemblies for the Human chromosome 14 via sampling. The $x$-axis represents the number of sampled reads. For each assembly, we plot the corresponding LAP on a chosen subsample along with the standard deviation. The relative ranking of assemblies becomes fixed with 10,000 reads, which is less than 0.02\% of the original reads.} \label{hg_mean_samples} \end{quote} \end{figure} \renewcommand{\baselinestretch}{2} \small\normalsize As seen in Figure~\ref{hg_mean_samples}, the overall ranking of the different assemblies stabilizes after sampling just 10,000 reads, i.e., less than 0.02\% of the entire dataset. After this point, the scores of individual assemblies differ by more than the standard deviation of the sub-sampled scores, indicating the relative ranking of the assemblies can be determined with high statistical confidence. This result suggests a practical strategy for computing the assembly likelihood wherein datasets of increasing size are repeatedly sampled from the set of reads until the likelihood scores of the compared assemblies can be distinguished from each other. The search for the appropriate sample size can start from a reasonable `guess' (e.g., 0.05\% of the total set of reads), which is then iteratively doubled until the likelihood scores are separated from each other by a given multiple of the sampling-induced standard deviation. \subsubsection{Aligner-based approximation correlates with the dynamic-programming computation of the likelihood score.} As outlined in the Methods section, we relied on an alignment program (in our case, Bowtie 2~\cite{langmead2012fast}) to estimate the likelihood of individual reads based on their alignment along the assembly. This approach is substantially faster than the more accurate dynamic programming algorithm that computes the cumulative likelihood of all possible alignments of a read against the assembly. \begin{figure}[h!] \begin{center} \includegraphics[width=4.86in]{figures/fig_3_aligner_vs_dynamic_sa} \end{center} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{quote} \caption[Comparison of the read probability calculation methods]{Comparison of the read probability calculation methods for \emph{S. aureus} with 4,788,174 reads. Each mark on the plot represents a single read. The read's position is determined by the probability calculated from our dynamic programming method (y-axis) and Bowtie 2 (x-axis). Points on the line $y=x$ denote reads that were given the same probability by both methods. Since Bowtie 2 only finds the best alignment, it usually reports a slightly lower probability. A probability threshold of 1e-30 is shown for the dynamic programming method. The read probabilities that fall below this threshold would be rounded up to 1e-30 during LAP computation.} \label{fig:aligner_vs_dynamic} \end{quote} \end{figure} \renewcommand{\baselinestretch}{2} \small\normalsize Figure~\ref{fig:aligner_vs_dynamic} compares the per-read likelihood values with respect to the complete genome sequence of \emph{Staphylococcus aureus}, using data provided by the GAGE competition. In this plot, each read is represented by a point whose coordinates represent the corresponding likelihood scores computed through full dynamic programming (y axis) and from Bowtie 2 alignments (x axis). As the full dynamic programming approach sums over all possible alignments, the corresponding likelihood values are higher (points occur above the diagonal) than those estimated by Bowtie 2. The difference between the two methods becomes less noticeable as the likelihood increases as more of the probability mass is concentrated around the best alignment of a read to the reference. \subsubsection{The likelihood scores correlate with reference-based validation} The recent assembly competitions GAGE~\cite{salzberg2011gage} and Assemblathon~1 \cite{earl2011assemblathon} relied on a combination of \emph{de novo} and reference-based metrics to compare and rank different assemblies. For the majority of these datasets, a complete or high-quality draft sequence was available, allowing the authors to objectively determine all the errors in the assemblies by aligning them to the reference sequences. Based on this information, the GAGE and Assemblathon 1 teams proposed several assembly quality metrics that simultaneously capture some aspects of the contiguity and correctness of an assembly. Here we compare our \emph{de novo} likelihood score to these reference-based metrics. Generally, the \emph{de novo} LAP scores agree with the reference-corrected contiguity values (see Tables~\ref{tab:rhodobacter},~\ref{tab:staph}, and ~\ref{tab:rhodobacter}). Furthermore, the reference genome assembly (assumed to be the most correct reconstruction of the genome being analyzed) achieves the highest LAP score while the references derived from the closely-related organisms are considerably worse than all the other assemblies. In other words, the \emph{de novo} LAP scores accurately capture the relative quality of the different assemblies. It is important to note that there are several exceptions to these general observations. In the case of \emph{S. aureus} USA300 (Table~2), the read-based LAP scores for the Abyss assembly (computed on both contigs and scaffolds) are better than those obtained for the reference genome, contradicting our intuition, since ABySS's reference-corrected contiguity is worse. This result highlights the importance of accurately modeling the sequencing experiment when computing the LAP scores. Once mate-pair information is taken into account, the LAP scores correctly identify the best assembly. This phenomenon is due to the fact that the Abyss assembly is able to incorporate more of the reads however their placement in the assembly is inconsistent with the mate-pair linkage information. \begin{landscape} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{table}[tb!] \centering \tiny \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|} \hline & \multicolumn{4}{c|}{Contigs} & \multicolumn{4}{c|}{Scaffolds} & & \\ \hline Assembler & LAP reads & LAP mates & N50 (kb) & CN50 (kb) & LAP reads & LAP mates & N50 (kb) & CN50 (kb) & Unaligned reads (frac) & Unaligned mates (frac) \\ \hline ABySS & -20.924 & -27.365 & 5.9 & 4.2 & -20.929 & -27.320 & 9 & 5 & 0.228 & 0.524\\ Allpaths-LG & {\bf -20.795} & {\bf -27.141} & 42.5 & {\bf 34.4} & {\bf -20.796} & {\bf -27.099} & {\bf 3,192} & {\bf 3,192} & {\bf 0.212} & {\bf 0.441} \\ Bambus2 & -21.528 & -27.439 & 93.2 & 12.8 & -21.531 & -27.424 & 2,439 & 2,419 & 0.270 & 0.501\\ CABOG & -22.550 & -27.749 & 20.2 & 17.9 & -22.550 & -27.714 & 66 & 55 & 0.345 & 0.540\\ MSR-CA & -21.496 & -27.407 & 22.1 & 19.1 & -21.497 & -27.324 & 2,976 & 2,966 & 0.268 & 0.478\\ SGA & -20.896 & -27.575 & 4.5 & 2.9 & -21.030 & -27.416 & 51 & 51 & 0.237 & 0.541 \\ SOAPdenovo & -20.816 & -27.160 & {\bf 131.7} & 14.3 & -20.816 & -27.152 & 660 & 660 & 0.214 & 0.453\\ Velvet & -20.903 & -27.314 & 15.7 & 14.5 & -20.907 & -27.246 & 353 & 270 & 0.219 & 0.471\\ \emph{R. sphaeroides} ATCC 17025 & -29.391 & -29.973 & 3,218 & 3,218 & -29.391 & -29.973 & 3,218 & 3,218 & 0.813 & 0.904\\ \emph{R. capsulatus} & -29.953 & -29.997 & 3,739 & 3,739 & -29.953 & -29.997 & 3,739 & 3,739 & 0.978 & 0.995\\ \emph{truth} & -20.769 & -27.071 & 3,189 & 3,189 & -20.769 & -27.071 & 3,189 & 3,189 & 0.209 & 0.432\\ \hline \end{tabular} \caption[\emph{Rhodobacter sphaeroides} 2.4.1 assembly evaluation]{Assembly likelihood scores for \emph{Rhodobacter sphaeroides} 2.4.1 from the GAGE project~\cite{earl2011assemblathon}. The results are presented separately for the contigs and scaffolds and include the number of unassembled reads (singletons), the LAP scores computed on unmated reads (LAP reads) or mate-pairs (LAP mates), the N50 contig/scaffold sizes (N50), and the reference-corrected N50 contig/scaffold sizes (CN50). The best (maximum) value for each genome-measure combination is highlighted in bold. The results for the reference assembly (either complete genome or high-quality draft) is given in the row marked \emph{truth}. In addition, we provide the results for a closely related strain and species. All values, except the LAP scores, were taken from the GAGE publication. A threshold probability of 1e-30 was used for calculating the LAP scores. The standard deviations for the LAP's reads and LAP's mates scores are 0.00685 and 0.00969, respectively.} \label{tab:rhodobacter} \end{table} \renewcommand{\baselinestretch}{2} \small\normalsize \end{landscape} \begin{landscape} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{table*}[tb!] \centering \tiny %\scalebox{0.50}{ \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|} \hline & \multicolumn{4}{c|}{Contigs} & \multicolumn{4}{c|}{Scaffolds} & &\\ \hline Assembler & LAP reads & LAP mates & N50 (kb) & CN50 (kb) & LAP reads & LAP mates & N50 (kb) & CN50 (kb) & Unaligned reads (frac) & Unaligned mates (frac)\\ \hline ABySS & {\bf -16.608} & -24.692 & 29.2 & 24.8 & {\bf -16.611} & -24.584 & 34 & 28 & {\bf 0.318} & 0.522\\ Allpaths-LG & -18.018 & -23.974 & 96.7 & {\bf 66.2} & -18.018 & {\bf -23.760} & 1,092 & {\bf 1,092} & 0.374 & {\bf 0.494}\\ Bambus2 & -18.083 & -24.256 & 50.2 & 16.7 & -18.085 & -23.899 & 1,084 & 1,084 & 0.375 & 0.503\\ MSR-CA & -18.282 & -24.258 & 59.2 & 48.2 & -18.282 & -23.926 & {\bf 2,412} & 1,022 & 0.389 & 0.508\\ SGA & -17.937 & -27.019 & 4 & 4 & -18.250 & -24.906 & 208 & 208 & 0.384 & 0.578\\ SOAPdenovo & -17.830 & {\bf -23.892} & {\bf 288.2} & 62.7 & -17.830 & -23.862 & 332 & 288 & 0.362 & 0.499\\ Velvet & -17.867 & -24.258 & 48.4 & 41.5 & -17.867 & -23.925 & 762 & 126 & 0.363 & 0.503\\ \emph{S. aureus} 04-02981 & -19.960 & -25.314 & 2,821 & 2,821 & -19.960 & -25.314 & 2,821 & 2,821 & 0.456 & 0.572\\ \emph{S. epidermidis} & -29.635 & -29.951 & 2,499 & 2,499 & -29.635 & -29.951 & 2,499 & 2,499 & 0.972 & 0.988\\ \emph{truth} & -17.741 & -23.509 & 2,873 & 2,873 & -17.741 & -23.509 & 2,873 & 2,873 & 0.358 & 0.473\\ \hline \end{tabular} \caption[\emph{Staphylococcus aureus} USA300 assembly evaluation.]{Assembly likelihood scores for \emph{Staphylococcus aureus} USA300 from the GAGE project~\cite{earl2011assemblathon}. The results are presented separately for the contigs and scaffolds and include the number of unassembled reads (singletons), the LAP scores computed on unmated reads (LAP reads) or mate-pairs (LAP mates), the N50 contig/scaffold sizes (N50), and the reference-corrected N50 contig/scaffold sizes (CN50). The best (maximum) value for each genome-measure combination is highlighted in bold. The results for the reference assembly (either complete genome or high-quality draft) is given in the row marked \emph{truth}. In addition, we provide the results for a closely related strain and species. All values, except the LAP scores, were taken from the GAGE publication. A threshold probability of 1e-30 was used for calculating the LAP scores. The standard deviations for the LAP's reads and LAP's mates scores are0.00740 and 0.0105, respectively.} \label{tab:staph} \end{table*} \renewcommand{\baselinestretch}{2} \small\normalsize \end{landscape} \begin{landscape} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{table}[tb!] \centering \tiny \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|c|} \hline & \multicolumn{4}{c|}{Contigs} & \multicolumn{4}{c|}{Scaffolds} & & & \\ \hline Assembler & LAP reads & LAP mates & N50 (kb) & CN50 (kb) & LAP reads & LAP mates & N50 (kb) & CN50 (kb) & CGAL Score & Unaligned reads (frac) & Unaligned mates (frac)\\ \hline ABySS & -18.473 & -23.801 & 2 & 2 & -18.474 & -23.787 & 2.1 & 2 & -15.21 x $10^8$ &0.257 & 0.504\\ Allpaths-LG & -15.813 & -21.413 & 36.5 & 21 & -15.824 & -21.314 & {\bf 81,647} & {\bf 4,702} & -13.11 x $10^8$ &0.115 & 0.239\\ Bambus2 & -18.606 & -23.474 & 5.9 & 4.3 & -18.642 & -23.343 & 324 & 161 & - & 0.258 & 0.422\\ CABOG & {\bf -15.625} & {\bf -21.128} & {\bf 45.3} & {\bf 23.7} & {\bf -15.626} & {\bf -21.041} & 393 & 26 & {\bf -12.25 x} $\mathbf{10^8}$ & 0.109 & {\bf 0.229}\\ MSR-CA & -16.421 & -22.428 & 4.9 & 4.3 & -16.436 & -21.861 & 893 & 94 & - & 0.122 & 0.276\\ SGA & -15.712 & -22.990 & 2.7 & 2.7 & -16.909 & -22.326 & 83 & 79 & - & 0.134 & 0.328\\ SOAPdenovo & -15.702 & -21.705 & 14.7 & 7.4 & -15.734 & -21.594 & 455 & 214 & * & {\bf 0.101} & 0.269 \\ Velvet & -18.000 & -23.468 & 2.3 & 2.1 & -18.140 & -23.375 & 1,190 & 27 & - & 0.214 & 0.442\\ \emph{truth} & -15.466 & -21.001 & 107,349.50 & 107,349.50 & -15.466 & -21.002 & 107,349.50 & 107,349.50 & -11.25 x $10^8$ & 0.093 & 0.211 \\ \hline \end{tabular} \caption[\emph{Homo sapiens} chr 14 assembly evaluation]{Assembly likelihood scores for human chromosome 14 from the GAGE project~\cite{earl2011assemblathon} using a 10,000 read sample. The results are presented separately for the contigs and scaffolds and include the number of unassembled reads (singletons), the LAP scores computed on unmated reads (LAP reads) or mate-pairs (LAP mates), the N50 contig/scaffold sizes (N50), and the reference-corrected N50 contig/scaffold sizes (CN50). The best (maximum) value for each genome-measure combination is highlighted in bold. The results for the reference assembly (either complete genome or high-quality draft) is given in the row marked \emph{truth}. In addition, we provide the results for a closely related strain and species. CGAL scores calculated from the long insert library were taken from the CGAL publication. The authors only provided scores for the top three assemblies (Bowtie2 could not successfully map reads to the SOAPdenovo assembly). All values, except the LAP and CGAL scores, were taken from the GAGE publication. A threshold probability of 1e-30 was used for calculating the LAP scores. The standard deviation for both the LAP's reads and LAP's mates scores is 0.15.} \label{tab:hg14} \end{table} \renewcommand{\baselinestretch}{2} \small\normalsize \end{landscape} In the case of the human chromosome 14 assembly (Table~3), the scaffold-based results do not agree with the reference-corrected contiguity values: the CABOG assembler outperforms Allpaths-LG in all but the corrected scaffold N50 measure. This result highlights the inherent difficulty of assessing the assembly quality even when a reference sequence is available. In this case, Allpaths-LG scaffold covers a larger stretch of the genome; however, at the cost of errors both within the contigs and in their relative placement. Furthermore, the CABOG assembler is able to align nearly 0.1\% more mate-pairs than Allpaths-LG, despite having a far smaller scaffold size. \begin{figure}[ht!] \begin{center} \includegraphics[width=4.86in]{figures/fig_4_asm1_ranks_reorder} \end{center} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{quote} \caption[Comparison between LAP scores and the rankings of the top assemblies generated in the Assemblathon~1 competition.] {Comparison between LAP scores and the rankings of the top assemblies generated in the Assemblathon~1 competition. The colors represent the relative ranking provided by the individual metrics (best - green, worst - red): log average probability (LAP), overall coverage (Cov tot), contig path NG50 (CPNG50), sum of all rankings from Assemblathon~1 (Overall), weighted median contig size based on estimated genome size (NG50), coverage within coding sequences (Cov genic), scaffold path NG50 (SPNG50), length for which half of any two valid columns in the assembly are correct in order and orientation (CC50), weighted median contig size based on total assembly size (N50), proportion of columns with a copy number error (Copy num), total substitution errors per correct bit (Subs), and sum of structural errors (Struct). Column descriptions and underlying data obtained from Table 3 in Earl et al.~\cite{earl2011assemblathon}. Columns are sorted according to the level of concordance with the LAP ranking. \emph{De novo} measures are highlighted in bold.} \label{fig:asm1_ranks} \end{quote} \end{figure} \renewcommand{\baselinestretch}{2} \small\normalsize The Assemblathon 1 competition~\cite{earl2011assemblathon} further demonstrated the difficulty of accurately assessing the relative quality of genome assemblies even when a correct reference sequence is available. The authors developed a collection of quality metrics that measure the stretch of a correctly assembled sequence (for example, contig path NG50 and scaffold path NG50), the amount of structural errors (such as insertions, deletions, and translocation), the long range contiguity (for example, the average distance between correctly paired genomic loci), the number of copy number errors, and the coverage within the assembly or only within coding regions. All these metrics were computed with respect to two reference haplotypes, from which the read data were simulated. The authors ranked the different assemblies by each of the metrics and used the combined information to rank the assemblies quality. In Figure~\ref{fig:asm1_ranks}, we compare the rankings provided by our LAP score to the rankings generated by the Assemblathon~1 competition. In addition to LAP, the figure also includes two variants of the most commonly used \emph{de novo} measure of assembly size, N50 -- the weighted median contig size, that is, the length of largest contig $c$ such that the total size of the contigs larger than $c$ exceeds half of the genome size. N50 uses the total assembly size as a proxy for the genome size while the NG50 value uses a guess of the actual genome size to compute the N50 value. The more accurate estimation of the genome size results in a better NG50's ranking, confirmed by the concordance with our LAP score. The overall coverage measure (percentage of the reference haplotypes covered by a particular assembly) correlates better with the LAP score than the other metrics. This result is not surprising as the LAP score is strongly affected by the number of the reads that can be correctly mapped to an assembly, which is ultimately correlated with the concordance between the assembly and the correct reference sequence. Interestingly, the overall rankings differ between LAP and the conclusions of the Assemblathon~1 study. Our analysis suggests that the BGI assembly is the best while the Assemblathon~1 picked the Broad assembly as the winner. This discrepancy can be partially explained in part by the Broad's high performance within the genic regions (LAP does not distinguish between genic and inter-genic segments) and the large weight placed on the BGI's assembly's poor performance in terms of substitution errors which have a relatively small effect on the LAP score. It is important to note that while LAP and the Assemblathon~1 results disagree in the exact total ranking of the assemblies, the top 11 assemblies are the same, meaning they are fundamentally of better quality than the remaining 9 assemblies presented in the Assemblathon~1 paper. In fact, the Assemblathon overall score jumps from 74 for the 11th (WTSI-P) assembly to 99 for the 12th (DCSISU) assembly, indicating a substantial qualitative difference. This is also reflected in the corresponding jump in the LAP score from -37.326 to -39.441 for the 11th (DOEJGI) and 12th (NABySS) assemblies, respectively. \subsubsection{The effect of a contaminant DNA on the assessment of the assembly quality} The Assemblathon~1 dataset provides an interesting challenge to the assembly assessment. The simulated libraries, generated in this project from the human chromosome 13, also included approximately 5\% of the contaminant DNA from an \emph{Escherichia coli} genome to simulate commonly encountered laboratory contamination that possibly occur due to the fragments of the cloning vector being sequenced along with the genome of interest. The participants to the Assemblathon~1 competition were given the option to either remove the contaminant DNA prior to assembly or retain the corresponding sequences in their assembly. This decision has little effect on comparison between the resulting assembly and the correct reference genome in the Assemblathon~1; however, the ability of an assembler to correctly reconstruct the contaminant genome significantly affects the corresponding LAP score. \begin{figure}[ht!] \begin{center} \includegraphics[width=4.86in]{figures/fig_5_asm1_probs} \end{center} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{quote} \caption[Effect of a contaminant DNA on the computation of the LAP scores.]{Effect of a contaminant DNA on the computation of the LAP scores. Red crosses are the LAP scores computed on the entire read set (including contamination). Blue crosses are the LAP scores computed only on the `true' reads that map to the genome of interest. The corresponding LAP scores are quite similar (those obtained from a smaller set of reads are correspondingly smaller) except for those of assemblies that removed the contaminant DNA prior to assembly, and receive a boost in the LAP scores obtained on the ``true'' data.} \label{fig:asm1_probs} \end{quote} \end{figure} \renewcommand{\baselinestretch}{2} \small\normalsize Indeed, the LAP score (Figure~\ref{fig:asm1_probs}) computed from the entire set of reads (the red crosses) and that computed after the contaminant reads were removed (the blue crosses) are strongly correlated, the latter scores are slightly lower since they were computed on the smaller dataset. In several cases, the assembly was performed after removal of the contaminant DNA (see ``jumps'' in Figure~\ref{fig:asm1_probs}). These assemblies are penalized by our framework for not assembling the contaminant DNA, a penalty that is removed once the same set of reads is used for both assembly and quality assessment. It is important to stress that the LAP scores can only be meaningfully compared across the assemblies generated from the same read set. If a contaminant is known it should either be removed from or retained within the dataset for all assemblers being compared; otherwise, the corresponding scores can not be directly compared. Note that this property is not unique to our measure: ignoring or assembling contaminant DNA also affects other traditional measures of quality, such as the N50 value or any reference-based measures, for example, in the case where the contaminant DNA shares significant similarity to the genome being assembled. In practice, a `contaminant' is not known \emph{a priori}, and its definition depends on the specifics of an experiment. In general, it is difficult, if not impossible, to distinguish between environmental contaminants and true artifacts in the data, both in the context of metagenomic projects and in the case of isolate genomes. For example, the \emph{Bacillus anthracis} samples from the bioterror attack in 2001, which were originally presumed to be uniform, contained a mixture of very closely related strains, and the characteristics of this mixture formed an important forensic marker in the investigation~\cite{rasko2011bacillus}. \begin{figure}[ht!] \begin{center} \includegraphics[width=4.26in]{figures/fig_6_e_coli_ec10} \end{center} \renewcommand{\baselinestretch}{1} \small\normalsize \begin{quote} \caption[Tuning SOAPdenovo k-mer parameter using LAP scores.]{Tuning SOAPdenovo k-mer parameter using LAP. LAP, N50, and corrected N50 are plotted for various SOAPdenovo assemblies of \emph{E. coli} K12 MG1655 dataset for different $k$-mer sizes (k=23-123). ALE~\cite{clark2013ale} scores are plotted alongside the LAP to show the differences between their underlying likelihood models. Also included is a breakdown of the errors along with the percentage of the unaligned reads for the various SOAPdenovo assemblies. Two vertical lines (at k=79 and k=87) correspond to the maximum ALE and LAP score, respectively.} \label{fig:e_coli} \end{quote} \end{figure} \renewcommand{\baselinestretch}{2} \small\normalsize \subsubsection{A useful application: tuning assembly parameters} Our discussion so far has focused on comparing the output of different assembly software with the goal of choosing the best assembler for a particular dataset. The developed probabilistic framework can also be used to better choose the combination of parameters that allow a particular assembly to achieve better results. To demonstrate this use case, we target the task of selecting the ``best'' (in terms of final assembly quality) $k$-mer length for a de~Bruijn graph-based assembler. We focus here on SOAPdenovo assemblies of the \emph{Escherichia coli} K12 MG1655 genome (Figure~\ref{fig:e_coli}). Without the availability of a reference sequence, users of assembly software usually rely on the N50 value as a proxy for the assembly quality. In this case, there is a clearly defined peak in N50 at k=79 (114,112 bp). After adjusting for the assembly errors, there is a collection of the assemblies (k=47-51, 55-75) with nearly identical corrected N50s ($\sim$64,000 bp). These assemblies range in N50 from $\sim$80-115 kbp. Our \emph{de novo} measure LAP shows a clear peak at k=87, which corresponds to a corrected N50 of 59,352 bp. It is important to note that despite roughly a 7\% difference from the peak in corrected N50 (k=63), the best LAP assembly contains 4 fewer indels larger than 5 bp, while also aligns roughly 54,000 more reads. Alongside our LAP, we plot the likelihoods calculated from another assembly evaluator framework, ALE ~\cite{clark2013ale}. The assembly with the highest ALE score (k=79) corresponds to the N50 peak. Compared to the LAP selected assembly, the ALE selected assembly contains 10 more indels larger than 5 bp and has a 49\% drop from N50 to corrected N50 compared to the 35\% drop between those values for the LAP's selected assembly. %%%%%%%%%%%%%%%% %% Discussion %% %% \section{Discussion} In this chapter, we have proposed a paradigm for the \emph{de novo} evaluation of genome assemblies. While the general paradigm could, in principle, be used to provide an objective score of assembly quality, our practical implementation of this paradigm, called the Log Average Probability (LAP), is dataset specific and should only be used to provide relative rankings of different assemblies of the same dataset. Unlike traditional measures of assembly contiguity (such as the N50 value), our reference-independent LAP scores correlate with reference-based measures of assembly quality. We would like to stress that \emph{de novo} measures of assembly quality, such as ours, are critically needed by researchers targeting an assembly of yet unknown genomes. The specific characteristics of the data being assembled have a significant impact on the performance of genome assemblers (in the Assemblathon~1~\cite{earl2011assemblathon} and GAGE~\cite{salzberg2011gage} competitions, for example, different assemblers `won' the competition depending on the analyzed dataset); thus, the reference-based quality assessments cannot be reliably generalized to new genome projects. In this chapter, we have made a number of simplifying assumptions for modeling the sequencing process; specifically, that the sequencing process is uniform (both in the coverage, and the error profile), and that the reads are independently sampled from the genome (with the exception of the dependence imposed by mate-pair experiments). While our approach can detect copy number differences (unless the entire genome is exactly duplicated), it is with the caveat that sequencing biases within repetitive regions can possibly mask mis-assemblies. More precise models of the sequencing process that relax these assumptions can be easily incorporated into our framework (e.g., effects of G/C content on sequencing depth, or technology-specific error profiles). We plan to create technology-specific variants of our score to keep up with the rapid changes in the characteristics of the sequencing data as new instruments and/or chemistries become available. Furthermore, the probabilistic framework presented here can be used to incorporate other types of information on the assembly quality, for example, optical mapping data~\cite{zhou2007validation}. \edit{In our assembler parameter-tuning experiment, we generated assemblies of \emph{Escherichia coli} K12 MG1655 using every allowed k-mer value. While this approach may be computationally feasible for smaller genomes, it is inefficient for very large, complex genomes. One solution would be to use an optimization strategy for selecting potential k-mer values, e.g., with simulated annealing. } \edit{While there are differences between the LAP score and recent likelihood-based metrics, ALE and CGAL, these differences are quite small (Table~3 and Figure~\ref{fig:e_coli}). Thus, it is important to discuss the technical improvements over ALE and CGAL.} ALE's score did not perform quite as well as our LAP score on the parameter tuning experiment, and CGAL is unable to evaluate all of the GAGE assemblies due to the technical limitations of Bowtie 2. %Since CGAL relies on Bowtie 2 to align the sequences to the assembly, technical limitations of Bowtie 2 prevent CGAL from evaluating a few of the GAGE assemblies. Bowtie 2 was not designed for reporting \emph{all} read alignments, which makes it very slow on large genomes. This problem will become more prevalent as sequencing costs continue to decrease, allowing for more complex genomes to be sequenced and assembled. \edit{Our framework overcomes CGAL's limitations by allowing users to calculate the LAP score via the dynamic programming method on a subset of the reads or by using the SAM file produced from a read alignment tool designed for finding all alignments (e.g., mrsFAST\cite{hach2010mrsfast}).} Our original goal was not to detect assembly errors, but to provide a global measure of how good an assembly may be. We plan to extend our framework to detect assembly errors by adopting a similar approach to that demonstrated by ALE. It is important to note that we have focused on a very specific use case for assembly -- the complete reconstruction of a given genome. Assembly algorithms are used in a number of other biological applications, whose specific characteristics affect the validation of the resulting assembly. For example, studies targeting the genic regions of an organism may tolerate large-scale rearrangements as long as the individual genes are correctly reconstructed. In this context, the validation framework would penalize substitution errors and small insertions or deletions (which potentially affect gene structure) more than mis-joins within intergenic regions. Such application specific tuning is possible within the proposed overall framework, and we envisage the creation of a collection of community-supported modules that compute application-specific LAP scores. Our discussion has focused on the assembly of single genomes, however the LAP score, as described, can also be directly used in the context of diploid genomes or metagenomic mixtures. In this case, our score implicitly assumes that the goal of the assembler is to correctly reassemble both the sequence and the relative abundances of the individual haplotypes. Assume, for example, a simple metagenomic sample that contains two organisms; one that is twice as abundant as the other one. An assembler that produces three sequences, corresponding to the three `haplotypes' in the sample (whether explicitly outputting two, perhaps identical, versions of the abundant organism or reporting the copy-number difference in some other way) would obtain a better LAP score than an assembler that only reported two sequences without any indication of their relative abundance. As a result, the majority of metagenomic assemblers available today, which only output the consensus sequence and not the relative abundance of the contigs, would score poorly under our score. We hope that our work will inspire the developers of future metagenomic assemblers to also output information on the relative abundance of the reconstructed sequences, information that is critical to the analysis of the data, yet rarely reported by existing tools. Finally, we propose that measures such as ours, which objectively capture the fit between the data being assembled and the output produced by the assembler without relying on curated reference data sets, become a standard tool in evaluating and comparing assembly tools, allowing the community to move beyond simplistic measures of contiguity such as the ubiquitous N50 measure. % \bigskip % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \section{Competing Interests} % The authors declare that they have no competing interests. % % \section{Author's contributions} % MG and CH developed the LAP software. CH carried out the GAGE and Assemblathon1 experiments. CH and DS performed the assembler parameter tuning experiments. MG, CH, IA, and HL developed the underlying statistical theory. MP conceived of the study, and participated in its design and coordination and helped to draft the manuscript. All authors read and approved the final manuscript. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%% % \section{Acknowledgements} % We would like to thank H\'{e}ctor Corrada Bravo and Bo Liu for their advice on the sampling procedure and associated statistics, Todd Treangen for advice on accessing the GAGE data, and the other members of the Pop lab for valuable discussions on all aspects of our work. % % This work was supported in part by the National Science Foundation % (grants IIS-0812111, IIS-1117247 to MP), and by the National % Institutes of Health (grant R01-HG-004885 to MP). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% The Bibliography %% %% %% %% Bmc_article.bst will be used to %% %% create a .BBL file for submission, which includes %% %% XML structured for BMC. %% %% After submission of the .TEX file, %% %% you will be prompted to submit your .BBL file. %% %% %% %% %% %% Note that the displayed Bibliography will not %% %% necessarily be rendered by Latex exactly as specified %% %% in the online Instructions for Authors. %% %% %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% %% %% Figures %% %% %% %% NB: this is for captions and %% %% Titles. All graphics must be %% %% submitted separately and NOT %% %% included in the Tex document %% %% %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% %% Do not use \listoffigures as most will included as separate files %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% %% %% Tables %% %% %% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %% Use of \listoftables is discouraged. %% %\section{Tables} % \begin{landscape} % \renewcommand{\baselinestretch}{1} % \small\normalsize % \renewcommand{\baselinestretch}{2} % \small\normalsize % \end{landscape}
{ "alphanum_fraction": 0.7485126689, "avg_line_length": 59.273199703, "ext": "tex", "hexsha": "2d71aa8c901f7af39de71261b83a210eea323b4f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "eddb7f6aa5de14aacce98d6172eec30a8bca379e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "cmhill/dissertation", "max_forks_repo_path": "ChapterLAP.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "eddb7f6aa5de14aacce98d6172eec30a8bca379e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "cmhill/dissertation", "max_issues_repo_path": "ChapterLAP.tex", "max_line_length": 909, "max_stars_count": 2, "max_stars_repo_head_hexsha": "eddb7f6aa5de14aacce98d6172eec30a8bca379e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "cmhill/dissertation", "max_stars_repo_path": "ChapterLAP.tex", "max_stars_repo_stars_event_max_datetime": "2016-10-07T23:12:04.000Z", "max_stars_repo_stars_event_min_datetime": "2015-10-05T12:13:13.000Z", "num_tokens": 20175, "size": 79841 }
\subsection{Subsection} \label{sec:subsection1} \lipsum[2-4] \cite{latexcompanion} \begin{equation}\label{eqn:test} E = m \cdot c^{2} \end{equation}
{ "alphanum_fraction": 0.7124183007, "avg_line_length": 17, "ext": "tex", "hexsha": "5dfda0cbeff174471ea51ed0f6e7213a2c74ecf4", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c7878810532dc8f5107bf164f30dab7eddc56823", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "TUW-GEO/latex_report", "max_forks_repo_path": "latex_report/chapters/1_1_0_equation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c7878810532dc8f5107bf164f30dab7eddc56823", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "TUW-GEO/latex_report", "max_issues_repo_path": "latex_report/chapters/1_1_0_equation.tex", "max_line_length": 47, "max_stars_count": 1, "max_stars_repo_head_hexsha": "c7878810532dc8f5107bf164f30dab7eddc56823", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "TUW-GEO/latex_report", "max_stars_repo_path": "latex_report/chapters/1_1_0_equation.tex", "max_stars_repo_stars_event_max_datetime": "2021-03-14T21:44:22.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-14T21:44:22.000Z", "num_tokens": 57, "size": 153 }
% -*- Mode:LaTeX -*- % CafeOBJ 1.4.0 syntax reference card. % $Id: syntax.tex,v 1.3 2010-06-17 08:23:17 sawada Exp $ % comment out the following 2 lines if you use old latex209 % \documentclass[a4paper]{article} \documentclass[a4paper]{article} \usepackage[scale=0.76]{geometry} % \usepackage[dvipdfm]{hyperref} \usepackage{hyperref} %%%% box \usepackage{fancybox} \newenvironment{fminipage}% {\begin{Sbox}\begin{minipage}}% {\end{minipage}\end{Sbox}\fbox{\TheSbox}} \newcommand{\vsep}{\vskip\fboxsep} \newcommand{\nvsep}{\vskip\fboxsep\noindent} %%%%% xlatex specific \usepackage{fontspec,xltxtra,xunicode} \defaultfontfeatures{Mapping=tex-text} \setromanfont[Mapping=tex-text]{Times New Roman} \setsansfont[Scale=MatchLowercase,Mapping=tex-text]{Gill Sans} \setmonofont[Scale=MatchLowercase]{Andale Mono} %** begin header \makeatletter \def\cafeobj{\textsf{CafeOBJ}} % kbd - argument is characters typed literally. \def\kbd#1{{\texttt{#1}\null}} % beginexample...endexample - surrounds literal text, such a code example. % typeset in a typewriter font with line breaks preserved. \def\example{\leavevmode\begingroup \obeylines\obeyspaces\parskip0pt\texttt} {\obeyspaces\global\let =\ } \def\endexample{\endgroup} % terminal - used for terminal symbols, argument is characters appear in sf. \def\sym#1{\textsf{#1}\null} % nonterm - used for non terminal symbols, argument is characters typed with % italic face. \def\nonterm#1{\textit{#1}\null} % syntax ... endsyntax \def\xstrut{\relax\ifmmode\copy\strutbox\else\unhcopy\strutbox\fi} \def\syntax{\syntaxoutnonbox\halign\bgroup \xstrut$\@lign##$\hfil &\hfil$\@lign{}##{}$\hfil &$\@lign##$\hfil &\qquad\@lign-- ##\hfil\cr} \def\endsyntax{\crcr\egroup$$ \global\@ignoretrue } \def\syntaxoutnonbox{\xleavevmode$$ \parskip=0pt\lineskip=0pt \def\\{\crcr}% Must have \def and not \let for nested alignments. \everycr={\noalign{\penalty10000}} \tabskip=0pt} \def\xleavevmode{\ifvmode\if@inlabel\indent\else\if@noskipsec\indent\else \if@nobreak\global\@nobreakfalse\everypar={}\fi {\parskip=0pt\noindent}\fi\fi\fi} \def\@but{\noalign{\nointerlineskip}} \def\alt{{\;|\;}} \def\seqof#1{\mbox{\textbf{\{}}\;{#1}\;\mbox{\textbf{\}}}} \def\optn#1{\textbf{[}\;{#1}\;\textbf{]}} \def\synindent{\;\;\;} \makeatother %** end of header \title{\cafeobj\ Syntax Quick Reference \\ {\small for Interpreter version 1.4.8 or later}} \date{\today} %\author{} \author{Toshimi Sawada\thanks{\texttt{[email protected]}} \\ Kosakusha Inc. } \begin{document} \maketitle \tableofcontents % \setlength{\parindent}{0pt} \setlength{\parskip}{1.4ex} \section{Syntax} \label{sec:cafeobj-syntax} We use an extended BNF grammar to define the syntax. The general form of a production is \begin{syntax} \synindent\synindent nonterminal &::=& alternative \alt alternative \alt \cdots \alt alternative \end{syntax} The following extensions are used: \begin{center} \begin{fminipage}{0.7\textwidth} \begin{tabular}{ll} a $\cdots$ & a list of one or more \textit{a}s. \\ a, $\cdots$ & a list of one or more \textit{a}s separated by commas: \\ & ``a'' or ``a, a'' or ``a, a, a'', etc. \\ $\seqof{\textrm{a}}$ & \textbf{\{} and \textbf{\}} are meta-syntactical brackets \\ & treating \textit{a} as one syntactic category. \\ $\optn{\textrm{a}}$ & an optional \textit{a}: `` '' or ``a''. \end{tabular} \end{fminipage} \end{center} Nonterminal symbols appear in \textit{italic face}. Terminal symbols appear in the face like this: ``\sym{terminal}'', and may be surrounded by `` and '' for emphasis or to avoid confusion with meta characters used in the extended BNF. We will refer terminal symbols other than self-terminating characters (see section ~\ref{sec:selfterminating}) as \textit{keyword}s in this document. %\subsection{CafeOBJ program} \subsection{CafeOBJ Spec} \label{sec:cafeobjprogram} \begin{syntax} \synindent\synindent spec &::=& \seqof{module \alt view \alt eval} \cdots \end{syntax} A \cafeobj\ spec is a sequence of \nonterm{module} (module declaration -- section ~\ref{sec:module-decl}), \nonterm{view} (view declaration -- section ~\ref{sec:view-decl}) or \nonterm{eval} (\textit{reduce} or \textit{execute} term -- section ~\ref{sec:eval}). \subsection{Module Declaration} \label{sec:module-decl} \begin{syntax} module &::=& module\_type\; module\_name \; \optn{parameters} \; \optn{principal\_sort} \\ && \sym{``\{''}\; module\_elt\cdots \; \sym{``\}''} \\ \synindent module\_type &::=& \sym{module} \alt \sym{module!} \alt \sym{module*}\\ \synindent module\_name &::=& ident & \footnote{The nonterminal \textit{ident} is for identifiers and will be defined in the section ~\ref{sec:identifier}.} \\ parameters &::=& \sym{``(''}\; parameter, \cdots \sym{``)''}\\ \synindent parameter &::=& \optn{\sym{protecting}\alt\sym{extending}\alt\sym{including}}\; paramter\_name \; \sym{::}\; module\_expr &\footnote{\textit{module\_expr} is defined in the section ~\ref{sec:modexpr}.}\footnote{If optional $\optn{\sym{protecting}\alt\sym{extending}\alt\sym{including}}$ is omitted, it is defaulted to \sym{protecting}.} \\ \synindent parameter\_name &::=& ident \\ principal\_sort &::=& \sym{principal-sort}\; sort\_name \\ % module\_elt &::=& import \alt sort \alt record \alt operator \alt module\_elt &::=& import \alt sort \alt operator \alt variable \alt axiom \alt macro \alt comment &\footnote{\nonterm{comment} is descussed in section ~\ref{sec:comments}.}\\ import &::=& \seqof{\sym{protecting}\alt\sym{extending}\alt\sym{including}\alt\sym{using}} \sym{``(''}\;module\_expr\;\sym{``)''}\\ sort & ::= & visible\_sort \alt hidden\_sort \\ \synindent visible\_sort & ::=& \sym{``[''}\; sort\_decl, \cdots \; \sym{``]''} \\ \synindent hidden\_sort & ::=& \sym{``*[''}\; sort\_decl, \cdots \; \sym{``]*''} \\ \synindent sort\_decl &::=& sort\_name\; \cdots\; \optn{supersorts\; \cdots} \\ \synindent supersorts &::=& <\; sort\_name\; \cdots \\ \synindent sort\_name &::=& sort\_symbol\optn{qualifier} &\footnote{There must not be any separators (see section ~\ref{sec:lex}) between \nonterm{ident} and \nonterm{qualifier}.} \\ \synindent sort\_symbol &::=& ident \\ \synindent qualifier &::=& \sym{``.''}module\_name\\ % record &::=& \sym{record}\; sort\_name\; \optn{super\;\cdots}\; % \sym{``\{''}\; \seqof{slot \alt comment}\cdots \; \sym{``\}''} \\ % \synindent super &::=& % \sym{``[''}\;sort\_name\;\optn{\sym{``(''}\; slot\_rename,\cdots % \sym{``)''}}\;\sym{``]''} \\ % \synindent slot &::=& slot\_name : sort\_name \alt % slot\_name\;\sym{=}\;\sym{``(''}term\sym{``)''}\;:\; sort\_name \\ % \synindent slot\_name &::=& ident \\ % \synindent slot\_rename &::=& slot\_name\; \verb|->|\; slot\_name\\ operator &::=& \seqof{\sym{op}\alt\sym{bop}} \; operator\_symbol\; : \; \optn{arity}\; \verb|->|\; coarity \; \optn{op\_attrs} &\footnote{\nonterm{operator\_symbol} is defined in section ~\ref{sec:opsymbol}.}\\ \synindent arity &::=& sort\_name \cdots \\ \synindent coarity &::=& sort\_name \\ \synindent op\_attrs &::=& \sym{``\{''}\;op\_attr\cdots\;\sym{``\}''} \\ \synindent op\_attr &::=& \sym{constr} \alt \sym{associative} \alt \sym{commutative} \alt\sym{idempotent} \alt \seqof{\sym{id:} \alt \sym{idr:}} \sym{``(''} \; term\; \sym{``)''} %&\footnote{The general syntax of \nonterm{term} is defined in section % ~\ref{sec:term}.} \\ &\alt & \sym{strat: ``(''}\; natural \cdots\; \sym{``)''} \alt\sym{prec:}\; natural \alt \sym{l-assoc} \alt \sym{r-assoc} \alt \sym{coherent} \alt \sym{demod} &\footnote{\nonterm{natural} is a natural number written in ordinal arabic notation.} \\ variable &::=& \sym{var}\; var\_name\; :\; sort\_name \alt \sym{vars} \; var\_name\cdots\; :\; sort\_name \\ \synindent var\_name &::=& ident \\ axiom &::=& equation \alt cequation\alt transition \alt ctransition \alt fol\\ \synindent equation &::=& \seqof{\sym{eq} \alt \sym{beq}}\; \optn{label}\; term \; \sym{=} \; term\; \sym{``.''}\\ \synindent cequation &::=& \seqof{\sym{ceq} \alt \sym{bceq}}\; \optn{label}\; term \; \sym{=} \; term\; \sym{if}\; term\;\sym{``.''} \\ \synindent transition &::=& \seqof{\sym{trans} \alt \sym{btrans}}\; \optn{label}\; term \; \verb|=>| \; term \;\sym{``.''}\\ \synindent ctransition &::=& \seqof{\sym{ctrans} \alt \sym{bctrans}}\; \optn{label}\; term \; \verb|=>| \; term\; \sym{if}\; term\;\sym{``.''}\\ \synindent fol & ::= & \sym{ax} \optn{label}\; term \; \sym{``.''}\\ \synindent label &::=& \sym{``[''}\; ident\cdots\; \sym{``]:''}\\ macro &::=& \sym{\#define}\; term\; \sym{::=}\; term\; \sym{``.''} \end{syntax} \subsection{Module Expression} \label{sec:modexpr} \begin{syntax} module\_expr &::=& module\_name \alt sum \alt rename \alt instantiation \alt \sym{``(''} module\_expr \sym{``)''} \\ sum &::=& module\_expr\; \seqof{+\;\; module\_expr} \cdots \\ rename &::=& module\_expr\;\sym{*}\;\sym{``\{''} rename\_map,\cdots \sym{``\}''} \\ % instantiation &::=& module\_expr\;\sym{``(''} % \seqof{ident \optn{qualifier}\;\verb|<=|\;aview},\cdots % \sym{``)''} instantiation &::=& module\_expr\;\sym{``(''}\textbf{\{}\;ident [ qualifier ]\; \verb|<=|\; aview\textbf{\}},\;\cdots\;\sym{``)''} \\ rename\_map &::=& sort\_map \alt op\_map \\ sort\_map &::=& \seqof{\sym{sort} \alt \sym{hsort}}\; sort\_name \; \verb|->|\; ident \\ op\_map &::=& \seqof{\sym{op} \alt \sym{bop}}\; op\_name\;\verb|->|\;operaotr\_symbol \\ op\_name &::=& operator\_symbol\alt \sym{``(''}operator\_symbol\sym{``)''}qualifier\\ aview &::=& view\_name\alt module\_expr\\ & \alt & \sym{view to}\; module\_expr\;\sym{``\{''} view\_elt,\cdots \sym{``\}''} \\ view\_name &::=& ident \\ view\_elt &::=& sort\_map \alt op\_view \alt variable \\ op\_view &::=& op\_map \alt term\; \verb|->|\; term \end{syntax} When a module expression is not fully parenthesized, the proper nesting of subexpressions may be ambiguous. The following precedence rule is used to resolve such ambiguity: \[ \nonterm{sum} < \nonterm{rename} < \nonterm{instantiation} \] \subsection{View Declaration} \label{sec:view-decl} \begin{syntax} view &::=& \sym{view}\; view\_name\; \sym{from}\; module\_expr\; \sym{to} \; module\_expr \\ && \sym{``\{''}\; view\_elt, \cdots \; \sym{``\}''} \\ \end{syntax} %\subsection{Term} %\label{sec:term} \subsection{Evaluation} \label{sec:eval} \begin{syntax} eval & ::= & \seqof{\sym{reduce}\alt\sym{behavioural-reduce} \alt\sym{execute}}\; context\; term\; \sym{``.''} \\ context & ::=& \sym{in}\; module\_expr\;\sym{:} \end{syntax} The interpreter has a notion of \textit{current module} which is specified by a \nonterm{module\_expr} and establishes a context. If it is set, \nonterm{context} can be omitted. \subsection{Sugars and Abbriviations} \label{sec:sugar} \paragraph{Module type} There are following abbreviations for \nonterm{module\_type}. \begin{center} \begin{tabular}{ll}\hline Keyword & Abbriviation \\\hline \sym{module} & \sym{mod}\\ \sym{module!} & \sym{mod!} \\ \sym{module*} & \sym{mod*}\\\hline \end{tabular} \end{center} \paragraph{Module Declaration} \begin{syntax} make &::=& \sym{make}\;module\_name\;\sym{``(''}\; module\_expr\;\sym{``)''} \end{syntax} \nonterm{make} is a short hand for declaring module of name \nonterm{module\_name} which imports \nonterm{module\_expr} with protecting mode. \begin{example} make FOO (BAR * \{sort Bar -> Foo\}) \end{example} is equivalent to \begin{example} module FOO \{ protecting (BAR * \{sort Bar -> Foo\}) \} \end{example} \paragraph{Principal Sort} \sym{principal-sort} can be abbriviated to \sym{psort}. \paragraph{Import Mode} For import modes, the following abbriviations can be used: \begin{center} \begin{tabular}{ll}\hline Keyword & Abbriviation \\\hline \sym{protecting} & \sym{pr} \\ \sym{extending} & \sym{ex} \\ \sym{including} & \sym{inc} \\ \sym{using} & \sym{us} \\\hline \end{tabular} \end{center} \paragraph{Simultaneous Operator Declaration} Several operators with the same arity, coarity and operator attributes can be declared at once by \sym{ops}. The form \begin{syntax} \sym{ops}\; operator\_symbol_1 \cdots operator\_symbol_n\; :\; arity\; \verb|->|\; coarity\; op\_attrs \end{syntax} is just equivalent to the following multiple operator declarations: \begin{syntax} \sym{op}\; & operator\_symbol_1\;:\;arity\;\verb|->|\;coarity\;op\_attrs \\ &\vdots\\ \sym{op}\; & operator\_symbol_n\;:\;arity\;\verb|->|\;coarity\;op\_attrs \end{syntax} \sym{bops} is the counterpart of \sym{ops} for behavioural operators. \begin{syntax} \sym{bops}\; operator\_symbol\;\cdots\; : \; arity\;\verb|->|\; coarity \; op\_attrs \end{syntax} In simultaneous declarations, parentheses are sometimes necessary to separate operator symbols. This is always required if an operator symbol contains dots, blank characters or underscores. \paragraph{Predicate} Predicate declaration (\nonterm{predicate}) is a syntactic sugar for declaring \sym{Bool} valued operators, and has the syntax: \begin{syntax} predicate &::=& \sym{pred}\;operator\_symbol\;:\;arity\;\optn{op\_attrs} &\footnote{You cannot use \nonterm{sort\_name} of the same character sequence as that of any keywords, i.e., \sym{module}, \sym{op}, \sym{vars}, etc. in \nonterm{arity}.} \end{syntax} The form \begin{syntax} \sym{pred}\;operator\_symbol\;:\;arity\;op\_attrs \end{syntax} is equivalent to: \begin{syntax} \sym{op}\;operator\_symbol\;:\;arity\;\verb|->|\; \sym{Bool} \;op\_attrs \end{syntax} \paragraph{Operator Attributes} The following abbriviations are available: \begin{center} \begin{tabular}{ll}\hline Keyword & Abbriviation \\\hline \sym{associative} & \sym{assoc} \\ \sym{commutative} & \sym{comm} \\ \sym{idempotent} & \sym{idem} \\\hline \end{tabular} \end{center} \paragraph{Axioms} For the keywords introducing axioms, the following abbriviations can be used: \begin{center} \begin{tabular}{ll|ll}\hline Keyword & Abbriviation & Keyword & Abbriviation \\\hline \sym{ceq} & \sym{cq} & \sym{bceq} & \sym{bcq} \\ \sym{trans} & \sym{trns} & \sym{ctrans} & \sym{ctrns} \\ \sym{btrans} & \sym{btrns} & \sym{bctrans} & \sym{bctrns} \\\hline \end{tabular} \end{center} \paragraph{Blocks of Declarations} References to (importations of) other modules, signature definitions and axioms can be clusterd in blocked declarations: \begin{syntax} imports &::=& \sym{imports}\;\sym{``\{''}\\ &&\synindent\seqof{import \alt comment}\cdots\\ &&\sym{``\}''}\\ signature &::=& \sym{signature}\;\sym{``\{''}\\ &&\synindent\seqof{sort \alt record \alt operator \alt comment}\cdots\\ &&\sym{``\}''}\\ axioms &::=& \sym{axioms}\;\sym{``\{''}\\ &&\synindent\seqof{variable \alt axiom\alt comment}\cdots\\ &&\sym{``\}''} \end{syntax} \paragraph{Views} To reduce the complexity of views appearing in module instantiation, some sugars are provided. First, it is possible to identify parameters by positions, not by names. For example, if a parameterized module is declared like \begin{example} \sym{module!} FOO (A1 :: TH1, A2 :: TH2) \sym{\{} ... \sym{\}} \end{example} the form \begin{example} FOO(V1, V2) \end{example} is equivalent to \begin{example} FOO(A1 <= V1, A2 <= V2) \end{example} Secondly, \sym{view to} construct in arguments of module instantiations can always be omitted. That is, \begin{example} FOO(A1 <= \sym{view to} \nonterm{module\_expr}\{...\}) \end{example} can be written as \begin{example} FOO(A1 <= \nonterm{module\_expr}\{...\}) \end{example} \paragraph{Evaluation} \begin{center} \begin{tabular}{ll}\hline Keyword & Abbriviation \\\hline \sym{reduce} & \sym{red} \\ \sym{bereduce} & \sym{bred} \\ \sym{execute} & \sym{exec} \\\hline \end{tabular} \end{center} \section{Lexical Considerations} \label{sec:lex} A \cafeobj\ spec is written as a sequence of tokens and separators. A \textit{token} is a sequence of ``printing'' ASCII characters (octal 40 through 176).\footnote{The current interpreter accepts Unicode characters also, but this is beyond the definition of CafeOBJ language.} A \textit{separator} is a ``blank'' character (space, vertical tab, horizontal tab, carriage return, newline, form feed). In general, any mumber of separators may appear between tokens. \subsection{Reserved Word} \label{sec:keywords} There are \textit{no\/} reserved word in \cafeobj. One can use keywords such as \sym{module}, \sym{op}, \sym{var}, or \sym{signature}, etc. for identifiers or operator symbols. \subsection{Self-terminating Characters} \label{sec:selfterminating} The following seven characters are always treated as \textit{self-terminating}, i.e., the character itself construct a token. \begin{center} \begin{tabular}{lllllll} \sym{(} & \sym{)} & \sym{,} & \sym{[} & \sym{]} & \sym{\{} & \sym{\}} \end{tabular} \end{center} \subsection{Identifier} \label{sec:identifier} Nonterminal \nonterm{ident} is for \emph{identifier} which is a sequnce of any printing ASCII characters except the followings: \begin{center} \begin{tabular}{l} self-terminating characters (see section ~\ref{sec:selfterminating})\\ \sym{.} (dot)\\ \sym{"} (double quote)\\ \end{tabular} \end{center} Upper- and lowercase are distinguished in identifiers. \nonterm{ident}s are used for module names (\nonterm{module\_name}), view names (\nonterm{view\_name}), parameter names (\nonterm{parameter\_name}), sort symbols (\nonterm{sort\_symbol}), variables(\nonterm{var\_name}), slot names (\nonterm{slot\_name}) and labels (\nonterm{label}). \subsection{Operator Symbol} \label{sec:opsymbol} The nonterminal \nonterm{operator\_symbol} is used for naming operators (\nonterm{operator}) and is a sequence of any ASCII characters (self-terminating characters or non-printing characters can be an element of operator names.)\footnote{The current implementation does not allow EOT character (control-D) to be an element of operator symbol. } Underscores are specially treated when they apper as a part of operator names; they reserve the places where arguments of the operator are inserted. Thus the single underscore cannot be a name of an operator. \subsection{Comments and Separators} \label{sec:comments} A \nonterm{comment} is a sequence of characters that begins with one of the following four character sequences \begin{center} \begin{tabular}{ll} \verb|--| & \verb|-->|\\ \verb|**| & \verb|**>| \\ \end{tabular} \end{center} which ends with a newline character, and contains only printing ASCII characters and horizontal tabs in between. A \nonterm{separator} is a blank character (space, vertical tab, horizontal tab, carriage return, newline, from feed). One or more separators must appear between any two adjuacent non-self-terminating tokens.\footnote{The same rule is applied to \nonterm{term}. Further, if an \nonterm{operator\_symbol} contains blanks or self-terminating characters, it is sometimes neccessary to enclose a term with such operator as top by parentheses for disambiguation.} Comments also act as separators, but their apperance is limited to some specific places (see section ~\ref{sec:cafeobj-syntax}). \end{document} % Local variables: % compile-command: "latex qref" % TeX-master: t % End:
{ "alphanum_fraction": 0.6670413899, "avg_line_length": 37.3473282443, "ext": "tex", "hexsha": "bf8856865b5c2661edf07f5e2519f30d55b9f602", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "a1fe0ae4bfcdb409511d1f642dbce205ff25205d", "max_forks_repo_licenses": [ "MIT-CMU" ], "max_forks_repo_name": "tswd/CafeOBJ", "max_forks_repo_path": "RefCard/syntax.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "a1fe0ae4bfcdb409511d1f642dbce205ff25205d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT-CMU" ], "max_issues_repo_name": "tswd/CafeOBJ", "max_issues_repo_path": "RefCard/syntax.tex", "max_line_length": 119, "max_stars_count": 1, "max_stars_repo_head_hexsha": "a1fe0ae4bfcdb409511d1f642dbce205ff25205d", "max_stars_repo_licenses": [ "MIT-CMU" ], "max_stars_repo_name": "tswd/CafeOBJ", "max_stars_repo_path": "RefCard/syntax.tex", "max_stars_repo_stars_event_max_datetime": "2015-06-30T19:16:51.000Z", "max_stars_repo_stars_event_min_datetime": "2015-06-30T19:16:51.000Z", "num_tokens": 6589, "size": 19570 }
\section{stem} \index{stem} \begin{shaded} \begin{alltt} /** stem Call stemr at the start of a pipe segment \end{alltt} \end{shaded}
{ "alphanum_fraction": 0.6934306569, "avg_line_length": 12.4545454545, "ext": "tex", "hexsha": "1acea89c6f28f916da40306d60607e9568c26b02", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ec27b6e3f908fbc50cb6dc54696daea68ae59103", "max_forks_repo_licenses": [ "ICU" ], "max_forks_repo_name": "RexxLA/NetRexx", "max_forks_repo_path": "documentation/njpipes/stem.tex", "max_issues_count": 25, "max_issues_repo_head_hexsha": "ec27b6e3f908fbc50cb6dc54696daea68ae59103", "max_issues_repo_issues_event_max_datetime": "2022-02-01T16:14:50.000Z", "max_issues_repo_issues_event_min_datetime": "2022-01-24T12:13:53.000Z", "max_issues_repo_licenses": [ "ICU" ], "max_issues_repo_name": "RexxLA/NetRexx", "max_issues_repo_path": "documentation/njpipes/stem.tex", "max_line_length": 43, "max_stars_count": null, "max_stars_repo_head_hexsha": "ec27b6e3f908fbc50cb6dc54696daea68ae59103", "max_stars_repo_licenses": [ "ICU" ], "max_stars_repo_name": "RexxLA/NetRexx", "max_stars_repo_path": "documentation/njpipes/stem.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 49, "size": 137 }
% Golden Rules of Bioinformatics Conclusions \subsection{Conclusion} \begin{frame} \frametitle{In Conclusion} \begin{itemize} \item \emph{Always communicate!} \begin{itemize} \item worst errors are silent \end{itemize} \item Don't trust the data \begin{itemize} \item formatting/validation/category errors - check! \item suitability for scientific question \end{itemize} \item Don't trust the software \begin{itemize} \item software is not an authority \item always benchmark, always validate \end{itemize} \item Don't trust yourself \begin{itemize} \item beware cognitive errors \item think statistically \item biological ``stories'' can be constructed from nonsense \end{itemize} \end{itemize} \end{frame}
{ "alphanum_fraction": 0.6780072904, "avg_line_length": 28.3793103448, "ext": "tex", "hexsha": "62802060ab0bf5b74e1d0942a094f22b7b65249d", "lang": "TeX", "max_forks_count": 14, "max_forks_repo_forks_event_max_datetime": "2021-06-08T18:24:04.000Z", "max_forks_repo_forks_event_min_datetime": "2015-02-05T20:54:37.000Z", "max_forks_repo_head_hexsha": "d642804aa73e80546e2326d2c2537c5727ac3ee8", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "peterjc/Teaching-Intro-to-Bioinf", "max_forks_repo_path": "presentation/sections/subsection_goldenruleconclusions.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "d642804aa73e80546e2326d2c2537c5727ac3ee8", "max_issues_repo_issues_event_max_datetime": "2018-10-05T10:53:49.000Z", "max_issues_repo_issues_event_min_datetime": "2016-11-25T11:55:43.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "peterjc/Teaching-Intro-to-Bioinf", "max_issues_repo_path": "presentation/sections/subsection_goldenruleconclusions.tex", "max_line_length": 67, "max_stars_count": 20, "max_stars_repo_head_hexsha": "d642804aa73e80546e2326d2c2537c5727ac3ee8", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "peterjc/Teaching-Intro-to-Bioinf", "max_stars_repo_path": "presentation/sections/subsection_goldenruleconclusions.tex", "max_stars_repo_stars_event_max_datetime": "2021-05-20T13:38:44.000Z", "max_stars_repo_stars_event_min_datetime": "2015-05-28T18:29:42.000Z", "num_tokens": 212, "size": 823 }
\documentclass[11pt,a4paper]{article} \usepackage[hyperref]{naaclhlt2019} \usepackage{times} \usepackage{latexsym} \usepackage{enumerate} \usepackage{graphicx} \usepackage{subfigure} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{multirow} \usepackage{bm} \usepackage{array} \usepackage{verbatim} \usepackage{xcolor} \usepackage{url} \usepackage{color} \usepackage{verbatim} \newcommand\BibTeX{B{\sc ib}\TeX} \title{CAN: Constrained Attention Networks for Multi-Aspect Sentiment Analysis} % \author{ % Mengting Hu \\ % {\tt [email protected]} \\ % {Nankai University, Tianjin, China} \\ % \And % Shiwan Zhao \\ % {\tt [email protected] } \\ % {IBM Research - China, Beijing , China} % } \author{Mengting Hu\textsuperscript{1}\thanks{\quad This work was done when Mengting Hu was a research intern at IBM Research - China.}, Shiwan Zhao\textsuperscript{2}, Li Zhang\textsuperscript{2}, Keke Cai\textsuperscript{2}, Zhong Su\textsuperscript{2}, Renhong Cheng\textsuperscript{1}, Xiaowei Shen\textsuperscript{2} \\ \textsuperscript{1} Nankai University, \textsuperscript{2} IBM Research - China \\ [email protected], \{zhaosw, lizhang, caikeke, suzhong\}@cn.ibm.com, \\ [email protected], [email protected] } \begin{document} \maketitle \begin{abstract} Aspect level sentiment classification is a fine-grained sentiment analysis task, compared to the sentence level classification. A sentence usually contains one or more aspects. To detect the sentiment towards a particular aspect in a sentence, previous studies have developed various methods for generating aspect-specific sentence representations. However, these studies handle each aspect of a sentence separately. In this paper, we argue that multiple aspects of a sentence are usually orthogonal based on the observation that different aspects concentrate on different parts of the sentence. To force the orthogonality among aspects, we propose constrained attention networks (CAN) for multi-aspect sentiment analysis, which handles multiple aspects of a sentence simultaneously. Experimental results on two public datasets demonstrate the effectiveness of our approach. We also extend our approach to multi-task settings, outperforming the state-of-the-arts significantly. \end{abstract} \section{Introduction} Sentiment analysis \cite{Nasukawa2003Sentiment,liu2012sentiment}, an important task in natural language understanding, receives much attention in recent years. Aspect level sentiment classification is a fine-grained sentiment analysis task, which aims at detecting the sentiment towards a particular aspect in a sentence. A multi-aspect sentence (\emph{i.e.}, the sentence contains more than one aspect) can be categorized as {\bf overlapping} or {\bf non-overlapping}. A sentence is annotated as non-overlapping only if any two of its aspects have no overlap. One key observation is that around $85\%$ of the multi-aspect sentences are non-overlapping in the two public datasets. Figure \ref{sentence} shows a simple example. The non-overlapping sentence contains two aspects. The aspect \emph{food} is in the left part of the sentence while \emph{service} in the right part. Their distributions on words are {\bf orthogonal} to each other. Another observation is that only a few words relate to the opinion expression in each aspect. As shown in Figure \ref{sentence}, only the word \emph{``great''} is relevant to the aspect \emph{food} and \emph{``dreadful"} to \emph{service}. The distribution of the opinion expression of each aspect is {\bf sparse}. To detect the sentiment towards a particular aspect, previous studies \cite{Wang2016Attention,Ma2017Interactive,cheng2017aspect,ma2018targeted,huang2018aspect,wang2018learning} have developed various attention-based methods for generating aspect-specific sentence representations. To name a few, \cite{Wang2016Attention} proposes attention-based LSTMs for aspect level sentiment classification. The attention mechanism can concentrate on different parts of a sentence when different aspects are taken as input. %By learning an aspect-specific representation of a sentence, they are more competitive for aspect level classification. \cite{wang2018learning} proposes a segmentation attention based LSTM model which can effectively capture the structural dependencies between the target and the sentiment expressions with a linear-chain conditional random field (CRF) layer. However, all these works are single-aspect sentiment analysis, which deals with aspects in a sentence one at a time, ignoring the orthogonality among multiple aspects. \begin{figure} \setlength{\abovecaptionskip}{0.2cm} %调整图片标题与图距离 \setlength{\belowcaptionskip}{-0.2cm} %调整图片标题与下文距离 \centering \includegraphics[width=0.45\textwidth]{example.pdf} \caption{Example of a non-overlapping sentence.} \label{sentence} \end{figure} Therefore, we propose a model for multi-aspect sentiment analysis, which handles multiple aspects of a sentence simultaneously. Specifically, we introduce orthogonal regularization for attention weights among multiple non-overlapping aspects. The orthogonal regularization tends to make the attention weights of multiple aspects concentrate on different parts of the sentence with less overlap. We also introduce the sparse regularization, which tends to make the attention weights of each aspect concentrate only on a few words. We call our networks with such regularizations {\bf constrained attention networks} (CAN). The implementation of adding regularization terms to attention weights of multiple aspects is similar to adding the penalization term in self-attention in \cite{lin2017structured}. The details will be introduced in the model section. In addition to aspect level sentiment classification ({\bf ALSC}), aspect category detection ({\bf ACD}) is another task of aspect based sentiment analysis. We introduce ACD as an auxiliary task to assist the ALSC task, benefiting from the shared context of the two tasks. Aspect category detection~\cite{Zhou2015Representation,Schouten2018Supervised} is a task which aims to identify the aspect categories discussed in a given sentence from a predefined set of aspect categories (e.g., price, food, service). Take Figure \ref{sentence} as an example, aspect categories \emph{food} and \emph{service} are mentioned. We also apply our attention constraints to the ACD task. By applying attention weight constraints to both ALSC and ACD tasks in an end-to-end network, we further evaluate the effectiveness of CAN in multi-task settings. In summary, the main contributions of our work are as follows: \begin{itemize} \vspace{-0.2cm} \item We propose CAN for multi-aspect sentiment analysis. Specifically, we introduce {\bf orthogonal} and {\bf sparse} regularizations to constrain the attention weight allocation, helping learn better aspect-specific sentence representations. To the best of our knowledge, this is the first work for multi-aspect sentiment analysis. %\vspace{-0.2cm} %\item \textcolor{red}{For accurately constraining the sentences, we annotate two public datasets about whether multiple aspects in the sentence are overlap, and we will publish the datasets.} \vspace{-0.2cm} \item We extend CAN to multi-task settings by introducing ACD as an auxiliary task, and applying CAN on both ALSC and ACD tasks. \vspace{-0.2cm} \item Extensive experiments are conducted on public datasets. Results demonstrate the effectiveness of our approach for aspect level sentiment classification. \end{itemize} \section{Related Work} %\subsection{Aspect Level Sentiment Classification} {\bf Aspect level sentiment classification} is a fine-grained sentiment analysis task. Earlier methods are usually based on explicit features \cite{liu2010improving,Vo2015Target}. \cite{liu2010improving} uses different linguistic features for sentiment classification. \cite{Vo2015Target} studies aspect-based Twitter sentiment classification by applying automatic features, which are obtained from unsupervised learning methods. With the rapid development of deep learning technologies, many end-to-end neural networks are implemented to solve this fine-grained task. \cite{Wang2016Attention} proposes an attention-based LSTM network for aspect-level sentiment classification. \cite{Tay2017Learning} introduces a word aspect fusion attention layer to learn attentive representations. \cite{Ma2017Interactive} proposes the interactive attention networks to generate the representations for targets and contexts separately. \cite{tay2017dyadic} proposes dyadic memory networks for aspect based sentiment analysis. \cite{cheng2017aspect,ruder2016hierarchical} both propose hierarchical neural network models for aspect level sentiment classification. \cite{ma2018targeted} proposes a two-step attention model for targeted aspect-based sentiment analysis. \cite{wang2018learning} proposes a segmentation attention based LSTM model for aspect level sentiment classification. However, all these works can be categorized as single-aspect sentiment analysis, which deals with aspects in a sentence separately, ignoring the orthogonality among multiple aspects. %\subsection{Multi-task Learning} {\bf Multi-task learning} \cite{Caruana1997Multitask} solves multiple learning tasks at the same time, achieving improved performance by exploiting commonalities and differences across tasks. Multi-task learning has been used successfully in many machine learning applications. \cite{Huang2018Multitask} learns both main task and auxiliary task jointly with shared representations, achieving improved performance in question answering. \cite{Toshniwal2017Multitask} uses low-level auxiliary tasks for encoder-decoder based speech recognition, which suggests that the addition of auxiliary tasks can help in either optimization or generalization. \cite{yu2016learning} uses two auxiliary tasks to help induce a sentence embedding that works well across domains for sentiment classification. In this paper, we adopt the multi-task learning approach by using ACD as the auxiliary task to help the ALSC task. %In our implementation, the ACD task shares the same sentence representation and use the same attention mechanism with the ALSC task, so that we can easily extend our attention constraints to the ACD task. The multi-task learning can benefit from shared sentence representations, as well as attention constraints. \begin{figure*} \setlength{\abovecaptionskip}{0.2cm} %调整图片标题与图距离 \setlength{\belowcaptionskip}{-0.3cm} %调整图片标题与下文距离 \centering \includegraphics[width=1.0\textwidth]{Big_network_new.pdf} \caption{Network Architecture. The aspect categories are embedded as vectors. The model encodes the sentence using LSTM. Based on its hidden states, aspect-specific sentence representations for ALSC and ACD tasks are learned via constrained attention. Then aspect level sentiment prediction and aspect category detection are made. } \label{network} \end{figure*} \section{Model} We first formulate the problem. There are totally $N$ predefined aspect categories in the dataset, $A=\{A_1,...,A_N\}$. Given a sentence $S=\{w_1, w_2, ..., w_L\}$, which contains $K$ aspects $A^s=\{A_1^s,...,A_K^s\}, A^s\subseteq A$, the multi-task learning is to simultaneously solve the ALSC and ACD tasks, namely, the ALSC task predicts the sentiment polarity of each aspect $A_k^s \in A^s$, and the auxiliary ACD task checks each aspect $A_n \in A$ to see whether the sentence $S$ mentions it. We propose CAN for multi-aspect sentiment analysis, supporting both ALSC and ACD tasks by a multi-task learning framework. The network architecture is shown in Figure \ref{network}. We will introduce all components sequentially from left to right. \subsection{Input and Embedding Layers} Traditionally, aspect based sentiment analysis handles each aspect separately, one at a time. In such settings, a sentence $S$ with $K$ aspects will be copied to form $K$ instances, each of which is associated with a single aspect. For example, a sentence $S$ contains two aspects, $A_1^s$ with polarity $p_1$ , and $A_2^s$ with polarity $p_2$. Two instances, $\langle{S, A_1^s, p_1}\rangle$ and $\langle{S, A_2^s, p_2}\rangle$, will be constructed. In this paper, our model is for multi-aspect sentiment analysis, handling multiple aspects of a sentence together. For the sentence $S$ with two aspects $A_1^s$ and $A_2^s$, the input to our model is $\langle{S, [A_1^s, A_2^s], [p_1, p_2]}\rangle$, as a single instance. With embedding matrices, the input sentence $\{w_1, w_2, ..., w_L\}$ is converted to a sequence of vectors $\{v_1,v_2,...,v_L\}$, and the $K$ aspects of the sentence are transformed to vectors $\{u_1^s,...,u_K^s\}$, which is a subset of $\{u_1,...,u_N\}$, the vectors of all aspect categories. The embedding dimension is $d$. \subsection{LSTM Layer} The word embeddings of the sentence are then fed into an LSTM network \cite{Hochreiter1997Long}, which outputs hidden states $H=\{h_1,h_2,...,h_L\}$. At each time step $l$, the hidden state $h_l$ of the LSTM is computed by: \begin{equation} \setlength{\abovedisplayskip}{4pt} % 公式与上文间距 \setlength{\belowdisplayskip}{4pt} % 公式与下文间距 h_l = LSTM(h_{l-1},v_l) \end{equation} %where $v_l$ is the input vector at time step $l$ and $h_{l-1}$ is the hidden state of the LSTM in previous time step $l-1$. The size of the hidden state is also set to be $d$. \subsection{Task-Specific Attention Layer} Our multi-task learning framework supports both ALSC and ACD tasks. The two tasks share the hidden states from the LSTM layer, while compute their own attention weights separately. The attention weights are then used to compute aspect-specific sentence representations. {\bf ALSC Attention Layer} The key idea of aspect level sentiment classification is to learn different attention weights for different aspects, so that different aspects can concentrate on different parts of the sentence. %Multi-aspect sentiment analysis simultaneously handles multiple aspects by adding constraints to their attention weights. We follow the approach in \cite{Bahdanau2015iclr} to compute the attention. Particularly, given the sentence $S$ with $K$ aspects, $A^s=\{A_1^s,...,A_K^s\}$, for each aspect $A_k^s$, its attention weights are calculated by: \begin{equation} \setlength{\abovedisplayskip}{4pt} % 公式与上文间距 \setlength{\belowdisplayskip}{4pt} % 公式与下文间距 \alpha_k = softmax({z^a}^\mathrm{T}tanh(W_{1}^{a}{H} + W_{2}^{a}(u_k^{s}\otimes{e_L}))) \label{equation_absa_att} \end{equation} where $u_k^{s}$ is the embedding of the aspect $A_k^s$, $e_L\in\mathbb{R}^{L}$ is a vector of $1$s, ${u_k^{s}}\otimes{e_L}$ is the operation repeatedly concatenating $u_k^{s}$ for $L$ times. $W_{1}^a\in\mathbb{R}^{{d}\times{d}}$, $W_{2}^a\in\mathbb{R}^{{d}\times{d}}$ and $z^a\in\mathbb{R}^{d}$ are the weight matrices. {\bf ACD Attention Layer} We treat the ACD task as multi-label classification problem for the set of $N$ aspect categories. For each aspect $A_n\in A$, its attention weights are calculated by: \begin{equation} \setlength{\abovedisplayskip}{4pt} % 公式与上文间距 \setlength{\belowdisplayskip}{4pt} % 公式与下文间距 \beta_n = softmax({z^b}^\mathrm{T}tanh(W_{1}^{b}{H} + W_{2}^{b}(u_n\otimes{e_L}))) \label{equation_acd_att} \end{equation} where $u_n$ is the embedding of the aspect $A_n$. $W_{1}^b\in\mathbb{R}^{{d}\times{d}}$, $W_{2}^b\in\mathbb{R}^{{d}\times{d}}$ and $z^b\in\mathbb{R}^{d}$ are the weight matrices. The ALSC and ACD tasks use the same attention mechanism, but they do not share parameters. The reason to use separated parameters is that, for the same aspect, the attention of ALSC concentrates more on opinion words, while ACD focuses more on aspect target terms (see the attention visualizations in Section~\ref{sec:att_vis}). %Take Figure \ref{sentence} as an example, for the aspect \emph{food}, ALSC concentrates more on the word \emph{``great''}, while ACD puts more attention on the word \emph{``taste''}. \subsection{Regularization Layer} Multi-aspect sentiment analysis simultaneously handles multiple aspects by adding constraints to their attention weights. {\bf Note that this layer is only available in the training stage}, in which the ground-truth aspects are known for calculating the regularization loss, and then influence parameter updating in back propagation. While in the testing/inference stage, the true aspects are unknown and the regularization loss is not calculated so that this layer is omitted from the architecture. In this paper, we introduce two types of regularizations: the sparse regularization on each single aspect; the orthogonal regularization on multiple non-overlapping aspects. {\bf Sparse Regularization} For each aspect, the sparse regularization constrains the distribution of the attention weights ($\alpha_k$ or $\beta_n$) to concentrate on less words. For simplicity, we use $\alpha_k$ as an example, $\alpha_k=\{\alpha_{k1}, \alpha_{k2}, ..., \alpha_{kL}\}$. To make $\alpha_k$ sparse, the sparse regularization term is defined as: \begin{equation} \setlength{\abovedisplayskip}{4pt} % 公式与上文间距 \setlength{\belowdisplayskip}{4pt} % 公式与下文间距 R_s = \mid\sum\limits_{l=1}^L{\alpha_{kl}^{2}} - 1\mid \label{equation:sparse_reg} \end{equation} where $\sum\limits_{l=1}^L{\alpha_{kl}}=1$ and $\alpha_{kl}>0$. Since $\alpha_k$ is normalized as a probability distribution, $L_1$ norm is always equal to $1$ (the sum of the probabilities) and does not work as sparse regularization as usual. Minimizing Equation \ref{equation:sparse_reg} will force the sparsity of $\alpha_k$. It has the similar effect as minimizing the entropy of $\alpha_k$, which leads to placing more probabilities on less words. {\bf Orthogonal Regularization} This regularization term forces orthogonality between attention weight vectors of different aspects, so that different aspects attend on different parts of the sentence with less overlap. Note that we only apply this regularization to non-overlapping multi-aspect sentences. Assume that the sentence $S$ contains $K$ non-overlapping aspects $\{A_1^s,...,A_K^s\}$ and their attention weight vectors are $\{\alpha_1,...,\alpha_K\}$. We pack them together as a two-dimensional attention matrix $M\in\mathbb{R}^{{K}\times{L}}$ to calculate the orthogonal regularization term. where $I$ is an identity matrix. In the resulted matrix of ${M^{\mathrm{T}}M}$, each non-diagonal element is the dot product between two attention weight vectors, minimizing the non-diagonal elements will force orthogonality between corresponding attention weight vectors. The diagonal elements of ${M^{\mathrm{T}}M}$ are subtracted by $1$, which are the same as $R_s$ defined in Equation \ref{equation:sparse_reg}. As a whole, $R_o$ includes both sparse and orthogonal regularization terms. Note that in the ACD task, we do not pack all the $N$ attention vectors $\{\beta_1, ..., \beta_N\}$ as a matrix. The sentence $S$ contains $K$ aspects. For simplicity, let $\{\beta_1, ..., \beta_K\}$ be the attention vectors of the $K$ aspects mentioned, while $\{\beta_{K+1}, ..., \beta_N\}$ be the attention vectors of the $N-K$ aspects not mentioned. We compute the average of the $N-K$ attention vectors, denoted by $\beta_{avg}$. We then construct the attention matrix $G=\{\beta_{1}, ..., \beta_{K},\beta_{avg}\}$, $G\in\mathbb{R}^{{(K+1)}\times{L}}$. The reason why we calculate $\beta_{avg}$ is that if an aspect is not mentioned in the sentence, its attention weights often attend to meaningless stop words, such as \emph{``to''}, \emph{``the''}, \emph{``was''}, etc. We do not need to distinguish among the $N-K$ aspects not mentioned, therefore they can share stop words in the sentence by being averaged as a whole, which keeps the $K$ aspects mentioned away from such stop words. \section{Experiments} \subsection{Datasets} We conduct experiments on two public datasets from SemEval 2014 task 4 \cite{Pontiki2014SemEval} and SemEval 2015 task 12 (denoted by Rest14 and Rest15 respectively). These two datasets consist of restaurant customer reviews with annotations identifying the mentioned aspects and the sentiment polarity of each aspect. To apply orthogonal regularization, we manually annotate the multi-aspect sentences with overlapping or non-overlapping. We randomly split the original training set into training, validation sets in the ratio 5:1, where the validation set is used to select the best model. We count the sentences of single-aspect and multi-aspect separately. Detailed statistics are summarized in Table \ref{table-dataset}. Particularly, $85.23\%$ and $83.73\%$ of the multi-aspect sentences are non-overlapping in Rest14 and Rest15, respectively. %%%%%%%Comment table \begin{comment} \begin{table}[t!] \begin{center} \setlength{\tabcolsep}{0.3mm}{ \begin{tabular} {|c|ccc|ccc|} \hline \multirow{2}{*}{Dataset} & \multicolumn{3}{c|}{\#sentences} & \multicolumn{3}{c|}{\#aspects} \\ \cline{2-7} & \emph{Single} & \emph{Multi} & Total & \emph{Single} & \emph{Multi} & Total \\ \hline Rest14\_Train & 2053 & 482 & 2535 & 2053 & 1047 & 3100\\ Rest14\_Val & 412 & 94 & 506 & 412 & 201 & 613 \\ Rest14\_Test & 611 & 189 & 800 & 611 & 414 & 1025 \\ \hline Rest15\_Train & 622 & 309 & 931 & 622 & 766 & 1388 \\ Rest15\_Val & 137 & 52 & 189 & 137 & 129 & 266 \\ Rest15\_Test & 390 & 192 & 582 & 390 & 455 & 845 \\ \hline \end{tabular}} \end{center} \caption{\label{table-dataset} The numbers of single-aspect and multi-aspect sentences, and the numbers of aspects in single-aspect and multi-aspect sentences.} \end{table} \end{comment} \begin{table}[t!] \setlength{\abovecaptionskip}{0.1cm} %调整图片标题与图距离 \setlength{\belowcaptionskip}{-0.4cm} %调整图片标题与下文距离 \begin{center} \setlength{\tabcolsep}{1mm}{ \begin{tabular} {|c|c|ccc|c|} \hline \multirow{2}{*}{Dataset} & \multirow{2}{*}{\#Single} & \multicolumn{3}{c|}{\#Multi} & \multirow{2}{*}{\#Total} \\ \cline{3-5} && \emph{OL} & \emph{NOL} & \emph{Total} & \\ \hline Rest14\_Train & 2053 & 67 & 415 & 482 & 2535\\ Rest14\_Val & 412 & 19 & 75 & 94 & 506 \\ Rest14\_Test & 611 & 27 & 162 & 189 & 800 \\ \hline Rest15\_Train & 622 & 47 & 262 & 309 & 931 \\ Rest15\_Val & 137 & 13 & 39 & 52 & 189 \\ Rest15\_Test & 390 & 30 & 162 & 192 & 582 \\ \hline \end{tabular}} \end{center} \caption{\label{table-dataset} The numbers of single- and multi-aspect sentences. \emph{OL} and \emph{NOL} denote the overlapping and non-overlapping multi-aspect sentences, respectively.} \end{table} \subsection{Comparison Methods} \begin{itemize} \item {\bf LSTM}: We implement the vanilla LSTM networks to model the sentence and use the average of all hidden states as the sentence representation. In this model, aspect information is not used. \vspace{-6pt} \item {\bf AT-LSTM} \cite{Wang2016Attention}: It adopts the attention mechanism in LSTM to generate a weighted representation of a sentence. The aspect embedding is used to compute the attention weights as in Equation \ref{equation_absa_att}. We do not concatenate the aspect embedding to the hidden state as in \cite{Wang2016Attention} and gain small performance improvement. We use this modified version of AT-LSTM in all experiments. \vspace{-6pt} \item {\bf ATAE-LSTM} \cite{Wang2016Attention}: This method is an extension of AT-LSTM. In this model, the aspect embedding is concatenated to each word embedding of the sentence as the input to the LSTM layer. %\vspace{-5pt} %\item {\bf AF-LSTM(CONV)} \cite{Tay2017Learning}: It utilizes circular convolution to compute deeper fusion relationships between each word in sentence and aspect. \end{itemize} \begin{table}[t!] \setlength{\abovecaptionskip}{0.0cm} %调整图片标题与图距离 \setlength{\belowcaptionskip}{-0.2cm} %调整图片标题与下文距离 \begin{center} \setlength{\tabcolsep}{1.5mm}{ \begin{tabular} {|c|cc|cc|} \hline \multirow{2}{*}{Model} & \multicolumn{2}{c|}{Rest14} & \multicolumn{2}{c|}{Rest15} \\ \cline{2-5} & 3-way & Binary & 3-way & Binary \\ \hline LSTM & 80.61 & 86.66 & 73.14 & 73.27 \\ AT-LSTM & 81.66 & 87.13 & 75.15 & 76.40 \\ ATAE-LSTM & 82.08 & 87.72 & 74.32 & 76.79 \\ %AF-LSTM(CONV) & 81.29 & 87.26 & - & - \\ \hline AT-CAN-$R_s$ & 81.97 & 88.08 & 75.74 & 80.05 \\ AT-CAN-$R_o$ & 82.60 & 88.67 & 75.03 & 81.10 \\ ATAE-CAN-$R_s$ & 82.29 & 87.37 & 76.09 & 80.83 \\ ATAE-CAN-$R_o$ & {\bf 82.91} & {\bf 89.02} & {\bf 77.28} & {\bf 82.66} \\ \hline \end{tabular}} \end{center} \caption{\label{table-st} Results of the ALSC task in terms of accuracy ($\%$). All methods are run in single-task settings.} \end{table} \subsection{Results} Table \ref{table-st} and \ref{table-mt} show our experimental results on the two public datasets for single-task and multi-task settings respectively. In both tables, ``3-way'' stands for 3-class classification (positive, neutral, and negative), and ``Binary'' for binary classification (positive and negative). The best scores are marked in bold. \begin{figure} \setlength{\abovecaptionskip}{0.1cm} %调整图片标题与图距离 \setlength{\belowcaptionskip}{-0.2cm} %调整图片标题与下文距离 \centering \subfigure[AT-LSTM]{ \includegraphics[width=0.45\textwidth]{at_.pdf}} \vspace{-5pt} \subfigure[M-AT-LSTM]{ \includegraphics[width=0.45\textwidth]{at_r2_.pdf}} \vspace{-5pt} \subfigure[M-CAN-2$R_o$]{ \includegraphics[width=0.45\textwidth]{multitask_at_2r2_.pdf}} \caption{Visualization of attention weights of different aspects in the ALSC task. Three different models are compared.} \label{compare-att} \end{figure} \begin{figure} \setlength{\abovecaptionskip}{0.1cm} %调整图片标题与图距离 \setlength{\belowcaptionskip}{-0.4cm} %调整图片标题与下文距离 \centering \includegraphics[width=0.45\textwidth]{f3_.pdf} \caption{Visualization of attention weights of different aspects in the ACD task from M-CAN-2$R_o$. The a/m is short for anecdotes/miscellaneous.} \label{ACD-att} \end{figure} \begin{figure} \setlength{\abovecaptionskip}{0.1cm} %调整图片标题与图距离 \setlength{\belowcaptionskip}{-0.5cm} %调整图片标题与下文距离 \centering \includegraphics[width=0.45\textwidth]{loss.pdf} \caption{The regularization loss curves of $R_s$ and $R_o$ during the training of AT-CAN-$R_o$.} \label{figure:reg-loss} \end{figure} {\bf Single-task Settings} Table \ref{table-st} shows our experimental results of aspect level sentiment classification in single-task settings. Firstly, we observe that by introducing attention regularizations (either $R_s$ or $R_o$), most of our proposed methods outperform their counterparts. Specifically, AT-CAN-$R_s$ and AT-CAN-$R_o$ outperform AT-LSTM in $7$ of $8$ results; ATAE-CAN-$R_s$ and ATAE-CAN-$R_o$ also outperform ATAE-LSTM in $7$ of $8$ results. For example, in the Rest15 dataset, ATAE-CAN-$R_o$ outperforms ATAE-LSTM by up to $7.64\%$ in the Binary classification. Secondly, regularization $R_o$ achieves better performance improvement than $R_s$ in all results. This is because $R_o$ includes both orthogonal and sparse regularizations for non-overlapping multi-aspect sentences. Finally, the LSTM method outputs the worst results in all cases, because it can not distinguish different aspects. We do not add regularization terms to the LSTM method since no attention weights are computed in this method. % 在Finally之前 It is worth noting that if a dataset contains only a small portion of multi-aspect sentences, the regularization $R_o$ may not outperform much than $R_s$. \section{Conclusion} We propose constrained attention networks for multi-aspect sentiment analysis, which handles multiple aspects of a sentence simultaneously. Specifically, we introduce orthogonal and sparse regularizations on attention weights. Furthermore, we introduce an auxiliary task ACD for promoting the ALSC task, and apply CAN on both tasks. Experimental results demonstrate that our approach outperforms the state-of-the-arts significantly. \bibliographystyle{acl_natbib} \bibliography{naaclhlt2019} \end{document}
{ "alphanum_fraction": 0.7631419725, "avg_line_length": 88.078369906, "ext": "tex", "hexsha": "c466e789731f0626c6e3466c4a4f311c4063c15c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6935d9f44d276020393ac4384236b08ef218280c", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Xhattam/PDF_summary_helper", "max_forks_repo_path": "arxiv-pdf-summary-helper/test/resources/ex1_original.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "6935d9f44d276020393ac4384236b08ef218280c", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Xhattam/PDF_summary_helper", "max_issues_repo_path": "arxiv-pdf-summary-helper/test/resources/ex1_original.tex", "max_line_length": 1257, "max_stars_count": null, "max_stars_repo_head_hexsha": "6935d9f44d276020393ac4384236b08ef218280c", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Xhattam/PDF_summary_helper", "max_stars_repo_path": "arxiv-pdf-summary-helper/test/resources/ex1_original.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 7647, "size": 28097 }
% Copyright (c) 2014 Thanumalayan Sankaranarayana Pillai. All Rights Reserved. % % Permission is hereby granted, free of charge, to any person obtaining a copy % of this software and associated documentation files (the "Software"), to deal % in the Software without restriction, including without limitation the rights % to use, copy, modify, merge, publish, distribute, sublicense, and/or sell % copies of the Software, and to permit persons to whom the Software is % furnished to do so, subject to the following conditions: % % The above copyright notice and this permission notice shall be included in % all copies or substantial portions of the Software. % % THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR % IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, % FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE % AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER % LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, % OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE % SOFTWARE. \documentclass[oneside]{memoir} \usepackage[breaklinks]{hyperref} \usepackage{enumitem} \begin{document} \title{ALICE: Application-Level Intelligent Crash Explorer} \date{} \maketitle This is a user documentation of the ALICE tool, which can be used to discover ``crash vulnerabilities'' in applications. Crash vulnerabilities are problems that get exposed by a sudden power loss or system crash while the application is running, and the application cannot recover correctly after rebooting the machine. ALICE focuses on single-node applications that run atop file systems. ALICE is different from similar tools in that it aims to find vulnerabilities that might occur across all file systems, including future ones. ALICE is also unique in targeting vulnerabilities associated with different source-code lines of the application, instead of checking the application's correctness atop arbitrarily (or systematically, but less useful) simulated crash scenarios. ALICE is designed to be extensible: both how it checks different source lines, and the combined behavior it assumes of underlying file systems, can be customized. The ALICE tool is a by-product of a research project (\url{http://research.cs.wisc.edu/adsl/Publications/alice-osdi14.html}) in the University of Wisconsin-Madison. \chapter{Installation} ALICE was tested to work on Ubuntu-12.02, and should be expected to work on similar (i.e., Linux-like) operating systems. The following are specific requirements: \begin{enumerate} \item Python-2.7, as the default version of python invoked via \verb;/usr/bin/env python2;. \item Standard software build tools, such as \verb;gcc; and \verb;GNU Make;. \item The libunwind libraries, installable in Ubuntu-12.02 using \verb;apt-get install libunwind7;. \end{enumerate} The following are the steps to install ALICE: \begin{enumerate} \item Download the most recent source-code tarball of ALICE, and untar it. This should produce a directory named \verb;alice;. \item Set the environmental variable \verb;ALICE_HOME; to point to the \verb;alice; directory (i.e., the untared directory). For example, this can be done by adding the line \verb;export ALICE_HOME=/wherever-untarred/alice; to your \verb;.bashrc; file. \item Set the \verb;PATH; environmental variable to include the \verb;alice/bin; directory. For example, this can be done by adding the line \verb;export PATH=$PATH:;\discretionary{}{}{}\verb;/wherever-untarred/alice/bin; to your \verb;.bashrc; file. \item Install the \verb;alice-strace; tracing framework by moving into the \verb;alice/alice-strace; directory, and running \verb+./configure; make; make install;+ \end{enumerate} \chapter{Basic Usage} The typical workflow for using ALICE has two steps. First, an application workload is run, and a trace of its activities are recorded. Second, ALICE is given this trace and a \textit{checker} script (explained later); ALICE explores the trace and displays discovered vulnerabilities. This documentation explains the typical usage of ALICE by using a toy application. \subsection{Toy application} The toy application can be found in \verb;alice/example/toy/toy.c;; the reader is encouraged to go through it. The application does the following: \begin{enumerate} \item It updates a file called \verb;file1;, changing the contents of the file from \textit{``hello''} to \textit{``world''}. The update is done using a typical ``write to temporary file and rename'' sequence, so that the contents are updated atomically. Immediately after updating, the application prints a message to the user's terminal (the user can then supposedly assume that the file has been updated, and that the file will contain \textit{``world''} even if a power loss happens). \item It creates two links to the file, \verb;link1; and \verb;link2;. The (imaginary) semantics of the toy application require both these links to be created atomically (i.e., if a power loss happens, either both links exist or neither do not). \end{enumerate} \subsection{Step 1: Running the application and recording a trace} A script that runs the application and records a trace, along with all initialization setup, can be found in \verb;alice/example/toy/toy_workload.sh;; the reader is encouraged to go through it. To perform \textmd{Step 1}, two directories are needed. The first, the \textit{workload directory}, is where the files of the application will be stored. The application, as it runs, will modify the workload directory and its contents. For the toy application, this is the place where \verb;file1;, \verb;link1;, and \verb;link2;, are placed. The \verb;toy_workload.sh; script first creates the workload directory, \verb;workload_dir;, and then initializes it with the file \verb;file1; containing \textit{``hello''}. The other needed directory, \textit{traces directory} is for storing the (multiple) traces that are recorded as the application is run. The \verb;toy_workload.sh; script next creates this directory, \verb;traces_dir;. After setting up the workload directory and the traces directory, the \verb;toy_workload.sh; script does a few more initialization things: compiling the \verb;toy.c; application, and \verb;cd;ing into \verb;workload_dir; so that the toy application can be run within there. The \verb;toy_workload.sh; script finally runs the application and records traces, by issuing the following command: \begin{verbatim} alice-record --workload_dir . \ --traces_dir ../traces_dir \ ../a.out \end{verbatim} If the reader is familiar with the \textit{strace} utility, the above command is similar to an invocation of strace: \verb;alice-record; is a script that records traces, while \verb;../a.out; is the actual application to be run (the process and all subprocesses of \verb;../a.out; are traced, similar to strace with the \verb;-ff; option). The \verb;alice-record; script requires two mandatory arguments: the workload directory and the traces directory (\verb;alice-record; takes one more optional argument, \verb;--verbose;, to control verbosity). \subsection{Step 2: Supply ALICE with the trace and the checker, and get back list of vulnerabilities} \textmd{Step 2} requires the user to supply ALICE with a checker script. The checker script will be invoked multiple times by ALICE, each invocation corresponding to a (simulated) system crash scenario that could have happened while the application was running in \textmd{Step 1}. During each invocation, the checker script will be given a directory that reflects the state of the workload directory if the (simulated) crash had really happened. If the given crashed-state workload directory has an expected (i.e., consistent) set of files, the checker script should exit with status zero, and should exit with a non-zero status otherwise. ALICE supplies the checker script with two command-line arguments. The first is the path to the crashed-state workload directory. The second command-line argument to the checker script is the path to an \textit{stdout file}. The stdout file contains all the messages that had been printed to the user's terminal at the time of the crash (corresponding to the supplied crashed-state workload directory), and can be used by the checker to check for durability, as explained below. Note that the crashed-state workload directory supplied by ALICE might differ from the original workload directory in \textmd{Step 1}. Hence, for applications that expect the \textit{absolute path} of the contents within the workload directory to not have changed (a small subset of applications in our experience), the checker script needs to move the supplied directory to the original directory, and then operate atop the original directory. The checker script for the toy application can be found in \verb;alice/example/;\discretionary{}{}{}\verb;toy/toy_checker.py;, and the reader is encouraged to go through it. The script first changes the current working directory into the crashed-state directory supplied by ALICE, and reads all the messages printed in the terminal at the time of the crash by reading the stdout file supplied by ALICE. If the application has printed the \textit{``Updated file1 to \emph{world}''} message, the checker script makes sure that \verb;file1; contains \textit{``world''}; otherwise, the checker script makes sure that \verb;file1; contains either \textit{``hello''} or \textit{``world''}. The checker script then makes sure that either \verb;link1; and \verb;link2; are both present, or are both not present. If any of the checked conditions do not hold, the checker script results in an assertion failure, thus exiting with a non-zero status (and thus informing ALICE that the application will fail if the simulated crash scenario happens in real). After writing the checker script, the user can invoke the \verb;alice-check; script to actually run ALICE and get the list of vulnerabilities. The reader is encouraged to run the following command from within the \verb;alice/example/toy; directory, to get a list of vulnerabilities discovered in the toy application (after running \verb;toy_workload.sh; first). \begin{verbatim} alice-check --traces_dir=traces_dir --checker=./toy_checker.py \end{verbatim} The \verb;alice-check; script has the following arguments: \begin{description}[leftmargin=!, labelindent=1.2cm, itemindent=-0.5cm]\itemsep1pt \parskip0pt \parsep0pt \item[traces\_dir] Mandatory. The traces directory, from \textmd{Step 1}. \item[checker] Mandatory. The checker script. \item[threads] Optional, default is 4. ALICE invokes checker scripts parallely, each checker script given a separate crashed-state directory to work on. Some applications do not allow multiple simultaneous invocations, and might require this option to be set to 1. \item[debug\_level] Optional, default is 0. Verbosity of warnings, can be 0, 1, or 2. \item[ignore\_mmap] Optional, default is False. The current version of ALICE does not trace \verb;mmap;-writes, and cannot correctly work with application workloads that use memory mapping to modify relevant files (see caveats and limitations). If the recorded trace during \textmd{Step 1} involves a writeable \verb;mmap(); to a seemingly relevant file, \verb;alice-check; aborts execution by default. However, some application workloads use \verb;mmap(); only on files that are irrelevant to crash consistency, for example to implement a shared-memory lock dealing with multi-process concurrency synchronization. This option can be set to True if the user is sure that the \verb;mmap();s observed while running the application workload are irrelevant to finding crash vulnerabilities. Some database applications use \verb;mmap(); for concurrency control, even when configured not to use \verb;mmap(); for otherwise accessing files, and require this option. \end{description} \subsection{Understanding ALICE's output} ALICE first outputs a list of list of the logical operations that form the \textit{update protocol} used by the application workload invoked in \textmd{Step 1}. The logical operations displayed is similar to a system-call trace, except that it is easier to understand, for example substituiting file names instead of file descriptor numbers. ALICE then displays any discovered vulnerabilities. Vulnerabilities are displayed in two ways: \textit{dynamic vulnerabilities}, relating to different operations in the update protocol, and \textit{static vulnerabilities}, relating to source-code lines. The proper display of static vulnerabilities requires the originally traced application to have debugging symbols; also, ALICE associates each logical operation to one of the stack frames in the logical operation's stack trace to display static vulnerabilities, and this association can sometimes be faulty. \chapter{Customizing, Extending, and Hacking} ALICE is designed to be extensible. The current version of ALICE strips off many features that were previously implemented, in hopes that a smaller code base promotes extensions. However, the current version is also not sufficiently commented, and does not follow some good coding practices; a well-commented version of the software might be released in the future if users shows interest. To extend ALICE, readers are required to go through our publication (\url{http://research.cs.wisc.edu/adsl/Publications/alice-osdi14.html}) to understand ALICE's design and philosophy. Note that there is some terminology difference between the publication and ALICE's source code; in particular, \textit{logical operations} discussed in the publication correspond to \textit{micro operations} in the source code, while \textit{micro operations} in the publication correspond to \textit{disk operations} in the source code. ALICE's default exploration strategy, which investigates the ordering and atomicity of each system call and reports any associated vulnerabilities, is coded in \verb;alice/alicedefaultexplorer.py;, and can be easily changed. The \verb;alicedefaultexplorer.py; code is complicated since it displays static vulnerabilities and invokes checkers in multiple threads. A functionally equivalent exploration strategy can be simpler. ALICE's default APM is coded in \verb;alice/alicedefaultfs.py;, and can be easily changed. The \verb;alicedefaultfs.py; code is complicated since it models a file system that can be configured to split file operations in different granularities. A functionally equivalent file system (with a single granularity) can be simpler. Other than the extensions discussed till now, users might try to add support for more system calls, file attributes, symbolic links, or other such details, in ALICE. Relevant to these, the \verb;_aliceparsesyscalls.py; script contains code that converts system calls into logical operations, while the \verb;replay_disk_ops(); function from the \verb;alice.py; script contains code that re-constructs a directory from a given list of micro-ops. \chapter{Caveats and Limitations} ALICE is a \textit{safe}, but not a \textit{complete} tool. That is, the application might have additional vulnerabilities than those discovered and reported. ALICE is thus not aligned towards comparing the correctness of different applications; specifically, any comparisons when not using equivalent workloads and checkers can easily produce confusing, wrong inferences. Also, any vulnerability displayed by ALICE might already be known to an application developer: the application documentation might explicitly require that the underlying file system not behave in those ways that will expose the vulnerability, or might simply not provide those guarantees that are being checked by the checker. The default file-system model (APM) used by ALICE is designed to also find vulnerabilities that can get exposed by future file systems; some crash scenarios that are possible with the default model do not happen in common current file systems. Also, ALICE's output (a list of vulnerabilities) is only designed to show the number of source lines that require ordering or atomicity. It is thus erraneous to directly correlate the number of vulnerabilities shown by ALICE with current real-world impact. ALICE does not currently attempt to deal with any file attributes (including modification time) other than the file size, or with the \verb;FD_CLOEXEC; and \verb;O_CLOEXEC; facilities. If the application's logic (that is invoked in the workload and the checker) depends on these, ALICE's output is probably wrong. Support for a few rarely-used system calls is also lacking; warning or error messages are displayed by ALICE if the application workload had invoked such calls. The situation for symlinks is similar; while the current version of ALICE attempts to support them slightly, if the application logic depends on symlinks, ALICE's output might be wrong. The current version of ALICE also does not support tracing memory-mapped writes; applications that use such writes as a part of their (relevant) update protocol cannot use ALICE. Note that a version of ALICE used in our published research paper (\url{http://research.cs.wisc.edu/adsl/Publications/alice-osdi14.html}) traced memory-mapped writes, but support was removed in the interest of distributability. Adding support for file attributes, \verb;CLOEXEC;, symlinks, and \verb;mmap(); writes does not require any changes to the design of ALICE, and might be done in future versions if users deem them helpful. \chapter{Credits, Acknowledgements, and Contact Information} Thanumalayan Sankaranarayana Pillai, Vijay Chidambaram, Ramnatthan Alagappan, and Samer Al-Kiswany were involved in various aspects of design and testing of the ALICE tool. Thanumalayan Sankaranarayana Pillai ([email protected]) is the primary author of the tool, and might serve to be the best contact for bug reports, feature requests, or other general discussions. Ramnatthan Alagappan extensively tested the tool, and Vijay Chidambaram also wrote a part of the code. The ALICE tool is a by-product of a research project (\url{http://research.cs.wisc.edu/adsl/Publications/alice-osdi14.html}) in the University of Wisconsin-Madison, and due credit must be given to all parties who were involved in or contributed to the project. The \verb;alice-strace; tracing framework is a slight customization of the strace tool (\url{http://sourceforge.net/projects/strace/}), along with some code adapted from strace-plus (\url{https://code.google.com/p/strace-plus/}). Credits must be given to the authors and contributors of strace and strace-plus. \end{document}
{ "alphanum_fraction": 0.7963890072, "avg_line_length": 139.0814814815, "ext": "tex", "hexsha": "9f3c0b5ef368a822989b5878094845819806e5fc", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-12-02T11:42:12.000Z", "max_forks_repo_forks_event_min_datetime": "2020-12-02T11:42:12.000Z", "max_forks_repo_head_hexsha": "8ddabcbdc9fbe072b2dc99596458390f7608fb6d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "dgraph-io/alice", "max_forks_repo_path": "doc/doc.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8ddabcbdc9fbe072b2dc99596458390f7608fb6d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "dgraph-io/alice", "max_issues_repo_path": "doc/doc.tex", "max_line_length": 1105, "max_stars_count": 1, "max_stars_repo_head_hexsha": "8ddabcbdc9fbe072b2dc99596458390f7608fb6d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "dgraph-io/alice", "max_stars_repo_path": "doc/doc.tex", "max_stars_repo_stars_event_max_datetime": "2018-01-31T03:14:33.000Z", "max_stars_repo_stars_event_min_datetime": "2018-01-31T03:14:33.000Z", "num_tokens": 4149, "size": 18776 }
\subsection{pwd -- The password database} To be done .... %
{ "alphanum_fraction": 0.6557377049, "avg_line_length": 12.2, "ext": "tex", "hexsha": "682d759f8d250e90dd5c215efe2d45522440c0be", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2016-11-24T19:55:47.000Z", "max_forks_repo_forks_event_min_datetime": "2016-11-24T19:55:47.000Z", "max_forks_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "remigiusz-suwalski/programming-notes", "max_forks_repo_path": "src/python3/sections/pwd.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "remigiusz-suwalski/programming-notes", "max_issues_repo_path": "src/python3/sections/pwd.tex", "max_line_length": 41, "max_stars_count": 1, "max_stars_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "remigiusz-suwalski/programming-notes", "max_stars_repo_path": "src/python3/sections/pwd.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-28T05:03:18.000Z", "max_stars_repo_stars_event_min_datetime": "2022-02-28T05:03:18.000Z", "num_tokens": 14, "size": 61 }
% % IT. % Information Technology % % Aleph Objects Operations Manual % % Copyright (C) 2014, 2015 Aleph Objects, Inc. % % This document is licensed under the Creative Commons Attribution 4.0 % International Public License (CC BY-SA 4.0) by Aleph Objects, Inc. % \section{Public Websites} These websites are provided for the public. \subsection{Aleph Objects} \url{https://www.alephobjects.com} --- Main Aleph Objects website. \begin{figure}[h!] \includegraphics[keepaspectratio=true,height=1.10\textheight,width=1.00\textwidth,angle=0]{www.alephobjects.com.png} \caption{Aleph Objects website, \href{www.alephobjects.com}{www.alephobjects.com}} \label{fig:wwwalephobjectscom} \end{figure} \subsection{LulzBot} \url{https://www.lulzbot.com} --- Main LulzBot website. \begin{figure}[h!] \includegraphics[keepaspectratio=true,height=1.10\textheight,width=1.00\textwidth,angle=0]{www.lulzbot.com.png} \caption{LulzBot website, \href{www.lulzbot.com}{www.lulzbot.com}} \label{fig:wwwlulzbotcom} \end{figure} \subsection{Aleph Objects Development Archive} \url{https://devel.alephobjects.com} --- Public development files for Aleph Objects. \subsection{LulzBot Development Archive} \url{https://devel.lulzbot.com} --- Public development files for LulzBot. \subsection{Aleph Objects Software Downloads} \url{https://download.alephobjects.com} --- Aleph Objects downloads. \subsection{LulzBot Products Final Release Files} \url{https://download.lulzbot.com} --- Final release source code for LulzBot products. \subsection{LulzBot User Discussion Forum} \url{https://forum.lulzbot.com} --- User discussion forum for LulzBot. \begin{figure}[h!] \includegraphics[keepaspectratio=true,height=1.10\textheight,width=1.00\textwidth,angle=0]{forum.lulzbot.com.png} \caption{LulzBot user discussion forum, \href{forum.lulzbot.com}{forum.lulzbot.com}} \label{fig:forumlulzbotcom} \end{figure} \subsection{Open Hardware Assembly Instructions Kit} \url{https://ohai-kit.alephobjects.com} --- Visual work instructions for assembling products and user support. \begin{figure}[h!] \includegraphics[keepaspectratio=true,height=1.10\textheight,width=1.00\textwidth,angle=0]{ohai-kit.alephobjects.com.png} \caption{Open Hardware Assembly Instructions Kit, \href{ohai-kit.alephobjects.com}{ohai-kit.alephobjects.com}} \label{fig:ohaikitalephobjectscom} \end{figure} \subsection{Newsletter} \url{https://phplist.alephobjects.com} --- Newletter mailing list. \subsection{Surveys} \url{https://survey.alephobjects.com} --- Surveys. \subsection{Rsync} \url{rsync://rsync.alephobjects.com} --- Rsync file server of download and development archives.
{ "alphanum_fraction": 0.7752936718, "avg_line_length": 36.1506849315, "ext": "tex", "hexsha": "561de56eb2894f71256f9373915cff97a923b657", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "55eb174f46d5d5ed9d98a0c7f4572c58ce5db75a", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "jebba/AOOM", "max_forks_repo_path": "source/IT-public.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "55eb174f46d5d5ed9d98a0c7f4572c58ce5db75a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "jebba/AOOM", "max_issues_repo_path": "source/IT-public.tex", "max_line_length": 121, "max_stars_count": 1, "max_stars_repo_head_hexsha": "55eb174f46d5d5ed9d98a0c7f4572c58ce5db75a", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "jebba/AOOM", "max_stars_repo_path": "source/IT-public.tex", "max_stars_repo_stars_event_max_datetime": "2020-04-28T23:53:44.000Z", "max_stars_repo_stars_event_min_datetime": "2020-04-28T23:53:44.000Z", "num_tokens": 778, "size": 2639 }
\section{Implementation} \subsection{LSM tree} An LSM tree, or log-structured merge-tree, is a key-value data structure with good performance characteristics. It is a good choice for providing indexed access to the data files, such as transaction log data or time series data. LSM tree maintains the data in two or more structures. Each of them is optimized for the media it is stored on, and the synchronization between the layers is done in batches~\cite{lsm_tree_orig}. A simple LSM tree consists of two layers, named $C_0$ and $C_1$. The main difference between these layers is that typically $C_0$ is an in-memory data structure, while $C_1$ should be stored on a disk. Therefore, $C_1$ usually is bigger than $C_0$, and when the amount of data in $C_0$ reaches a certain threshold, the data is merged to $C_1$. To maintain a suitable performance it is suggested both $C_0$ and $C_1$ have to be optimized for their application. The data has to be migrated efficiently, probably using algorithms that may be similar to merge sort. To maintain this merging efficiency, it was decided to use SSTable as the $C_1$ level, and B-tree for the $C_0$. SSTable, or Sorted String Table, is a file that contains key-value pairs, sorted by key~\cite{sstable}. Using SSTable for storing time series data is a good solution if the data is being streamed from a monitoring system. Because of this, they are sorted by the timestamp, which is a good candidate for the key. The value for the SSTable could be the measurement itself. SSTable is always an immutable data structure, meaning that the data cannot be directly deleted from the file; it has to be marked as "deleted" and then removed during the compaction process. The compaction process is also used to remove the obsolete data if it has a specific time-to-live period. \subsection{Commitlog} Any application that is used to work with mission-critical data has to have the ability to consistently save the data in case of a power outage or any other unexpected termination of the application. To maintain this ability, a commit log mechanism is used. It is a write-only log of any inserts to the database. It is written before the data is appended to the data files of the database. This mechanism is commonly used in any relational or non-relation DBMS. Since the in-app storage system has to maintain this ability as well, it was necessary to implement the commit log alongside the LSM tree. In order to ensure that the entries in this commit log are persisted on the disk storage, the fsync syscall was used, which negatively affected the performance of the resulted storage system. \subsection{Implemented library} In order to implement the feature of storing time-series data within the Go application, the GoLSM library was developed. It provides mechanisms to persist and retrieve time-series data, and it uses a two-layer LSM tree as well as a commit log mechanism to store the data on disk. The architecture of this library is represented in Figure~\ref{fig2}. \begin{figure}[h!] \includegraphics[width=\textwidth,keepaspectratio]{figures/golsm-arch.eps} \caption{An architecture of a GoLSM library.} \label{fig2} \end{figure} Since this library was initially developed for a particular subject area and particular usage, it has a number of limitations. For example, it has no functions to delete the data; instead, it is supposed to save the measurement with a particular expiration point, after which the data will be automatically removed during the compaction process. The data that is being stored using GoLSM should consist of one or multiple measurements; each measurement is represented by a tag name, which could be an identifier of a sensor or the measurement device, origin, which is the timestamp when the measurement was captured, and the measurement value, which is stored as a byte array. This byte array can vary in size. It makes the storage of each measurement a more complicated procedure. As seen, the storage system consists of two layers, in-memory layer and persistent storage layer. The in-memory layer is based on a B-tree implementation by Google~\cite{btree_google} . It stores a small portion of the data of a configurable size. The storage layer consists of a commit log manager and an SSTable manager. The commit log manager maintains the two commit log files; while one is used to write the current data, another one is used to append the previously written data to the SSTable files, which are managed by SSTable Manager. Each SSTable file contains its own tag, and it also has a dedicated in-memory index, which is also based on a B-tree. This index is used to speed up the retrieval of the data from the SSTable when the requested time range is bigger than what is stored on an in-memory layer.
{ "alphanum_fraction": 0.7975792988, "avg_line_length": 208.347826087, "ext": "tex", "hexsha": "71d3ae0facfda9e9e657e7a6cf5bd607d01a34b8", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ea8eb1fede20913bed754ec28edd17be2f9098bc", "max_forks_repo_licenses": [ "LPPL-1.3c" ], "max_forks_repo_name": "nikita-tomilov/mayor_golsm", "max_forks_repo_path": "2-lsm-impl.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ea8eb1fede20913bed754ec28edd17be2f9098bc", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "LPPL-1.3c" ], "max_issues_repo_name": "nikita-tomilov/mayor_golsm", "max_issues_repo_path": "2-lsm-impl.tex", "max_line_length": 819, "max_stars_count": null, "max_stars_repo_head_hexsha": "ea8eb1fede20913bed754ec28edd17be2f9098bc", "max_stars_repo_licenses": [ "LPPL-1.3c" ], "max_stars_repo_name": "nikita-tomilov/mayor_golsm", "max_stars_repo_path": "2-lsm-impl.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1037, "size": 4792 }
\section{Introduction} Most software activities span multiple applications. The slogan ``there's an app for that'' illustrates that we live in a world filled with specialized apps that each focus on a few specific tasks. To accomplish larger activities requires composing multiple applications together into a ``toolbelt'' \cite{Sumner1997}. For example, designing an interface might comprise drawing a logo in Illustrator, mocking up a prototype in Sketch, adding animations using Flinto, and presenting it to a client using Keynote. Analyzing data might involve formatting it using Python, viewing and graphing it in Excel, modifying graph aesthetics in Photoshop, and reporting results in Word. Toolbelts help users tailor custom ecosystems and support distributed innovation. However, this bricolage creates a user experience problem: even with design guidelines, every application is different \cite{Beaudouin-Lafon2018}. As new applications appear and existing ones change, few people are fluent experts in all the steps towards their goals. Presenting learning resources in-application \cite{Grossman2010a, Chilana2012, Matejka2011a, Matejka2011, Brandt2010, Ichinco2017} and augmenting search queries with contextual information \cite{Ekstrand2011, Brandt2010} can offer a more fluid experience with lower cognitive load. However, existing solutions require deep integration with applications. And since today's applications are ``walled gardens'' with limited integration across software vendors \cite{Beaudouin-Lafon2018}, help resources typically focus on one application at a time. This leaves gaps when users want to move from one application to another (\textit{e.g.,} export an Adobe \textsc{xd} prototype to Zeplin) or interleave applications (\textit{e.g.,} coding a website in Sublime while debugging in Chrome and resizing graphics in \textsc{gimp}). % Web search results can of course include community-created resources that span applications. However, generic web search poses two problems. First, search is blind to relevant contextual information that could connect users to better resources \cite{Ekstrand2011, Kraft2005, Finkelstein2002}. Search engines place the burden on users to articulate an appropriate query, an almost paradoxical requirement for users who are there because they don't know the domain \cite{Russell2011}. Second, search is divorced from the application UX, requiring users to bounce back and forth to connect the content \cite{Fourney2014Intertwine}. These challenges are amplified when users work with multiple applications, each with its own terminology and conventions. We introduce an application-independent approach for contextually presenting video learning resources. We embody this approach in the RePlay system (\autoref{fig:replay-interface}), which enables users to search for learning videos based on their application usage. RePlay gathers application context using system accessibility \textsc{api}s. It extends online video search and cues videos to relevant segments based on their captions. We focus on video assistance because despite video's growing popularity (Cisco predicts that by 2021, 82\% of all internet traffic will be video \cite{Cisco}), searching and browsing videos remain cumbersome \cite{Kim2014, Pavel2014, Pavel2015}. Video is popular for content creators as it is often easier to author than tutorials or diagrams (which require careful curation). Learners value video for its efficacy in communicating complex or continuous visual actions such as brushing or setting parameters \cite{Chi2012}. However, interacting with videos remains difficult because they are harder to navigate and scan for steps than text \cite{Chi2012}. %for its popularity as a learning resource \cite{Chi2012, Pongnumkul2011, Nguyen2015}, especially for visual tasks %its simultaneously the most useful thing for many tasks and also the least well supported by current search interfaces % while video has become increasingly ubiquitous and easy to create -- ... -- searching and browsing remain cumbersome. We report on two studies observing how people use RePlay and web video help: a week-long field study ($n\!=\!7$) and a lab study ($n\!=\!24$) where half the participants used RePlay and half used web video search. Both studies used visual design as the domain, as video is especially helpful for visual tasks \cite{Pongnumkul2011}. The field study examined how designers with varying experience used RePlay in-situ. Participants used an average of 17 different applications in a week, emphasizing the importance of system-wide integration. Our findings also suggested that contextual video assistance benefits targeted tasks more than open-ended ones. The lab study found that contextual video assistance helps people spend less time away from their task than web video search, and replaces strategies typically used in navigating between and within videos. This work makes the following contributions: \begin{enumerate} \item An application-independent method for finding relevant clips in learning videos that leverages user context, \item the RePlay system, which demonstrates this method using accessibility \textsc{api}s and online video corpora, and \item insights from two studies that highlight the importance of multi-application support and the promise of cross-application search. \end{enumerate} % replay is an example implementation of such a tool / embodiment of this approach / design probe to illustrate how this could be done, and investigate whether it helps people. / show that this is a viable direction for future research % people dream of this thing. we offer a new path to achieve aspects of that goal. knitting help together lowers the friction between applications % arg 1: lack of integration creates friction that our work mitigates % ****arg 2: if you need to assemble a buncha tools that all work differnetly you won't know them all % arg 3: the seams / hand off points require guidance %Currently, it is difficult for users to get effective guidance when tasks span applications: in-situ help and official documentation is only available at the individual application level, and community-created resources that sometimes do span applications move the user out of their workflow and into the browser. %include Google YT timeline marker result as an example %have IoT example for camera-ready video % somewhere also mention accessibility for all argument %watching ppl use 2 different video interfaces taught us what a good video interface should be (wouldn't be able to find this otherwise)
{ "alphanum_fraction": 0.8131320755, "avg_line_length": 161.5853658537, "ext": "tex", "hexsha": "168c61f5a26912d593a35f615c8d50b5869ee7b9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "36c269696ac71cd1f7dac50fa094f8db72e895f6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ailiefraser/ucsd-thesis", "max_forks_repo_path": "replay/1_intro.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "36c269696ac71cd1f7dac50fa094f8db72e895f6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ailiefraser/ucsd-thesis", "max_issues_repo_path": "replay/1_intro.tex", "max_line_length": 1091, "max_stars_count": null, "max_stars_repo_head_hexsha": "36c269696ac71cd1f7dac50fa094f8db72e895f6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ailiefraser/ucsd-thesis", "max_stars_repo_path": "replay/1_intro.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1339, "size": 6625 }
\chapter{Python Client API} \label{chap:api:python-client} HyperDex provides Python bindings to the client in the module \code{hyperdex.client}. This library wraps the HyperDex C client library and enables the use of native Python data types. \subsection{Building the HyperDex Python Binding} \label{sec:api:python-client:building} The HyperDex Python bindings must be requested at configure time as it is not automatically built. You can ensure that the Python bindings are always built by providing the \code{--enable-python-bindings} option to \code{./configure} like so: \begin{consolecode} % ./configure --enable-client --enable-python-bindings \end{consolecode} \subsection{Using Python Within Your Application} \label{sec:api:python-client:using} All client operation are defined in the \code{hyperdex.client} module. You can access this in your program with: \begin{pythoncode} import hyperdex.client \end{pythoncode} \subsection{Hello World} \label{sec:api:python-client:hello-world} The following is a minimal application that stores the value "Hello World" and then immediately retrieves the value: \inputminted{python}{\topdir/python/client/hello-world.py} You can run this example with: \begin{consolecode} % python hello-world.py put "Hello World!" got: {'v': 'Hello World!'} \end{consolecode} Right away, there are several points worth noting in this example: \begin{itemize} \item Every operation is synchronous. The put and get operations run to completion by default. \item Python types are automatically converted to HyperDex types. There's no need to specify information such as the length of each string, as one would do with the C API. \end{itemize} \subsection{Asynchronous Operations} \label{sec:api:python-client:async-ops} For convenience, the Python bindings treat every operation as synchronous. This enables you to write short scripts without concern for asynchronous operations. Most operations come with an asynchronous form, denoted by the \code{async\_} prefix. For example, the above Hello World example could be rewritten in asynchronous fashion as such: \inputminted{python}{\topdir/python/client/hello-world-async-wait.py} This enables applications to issue multiple requests simultaneously and wait for their completion in an application-specific order. It's also possible to use the \code{loop} method on the client object to wait for the next request to complete: \inputminted{python}{\topdir/python/client/hello-world-async-loop.py} \subsection{Data Structures} \label{sec:api:python-client:data-structures} The Python bindings automatically manage conversion of data types from Python to HyperDex types, enabling applications to be written in idiomatic Python. \subsubsection{Examples} \label{sec:api:python-client:examples} This section shows examples of Python data structures that are recognized by HyperDex. The examples here are for illustration purposes and are not exhaustive. \paragraph{Strings} The HyperDex client recognizes Python's strings and automatically converts them to HyperDex strings. For example: \begin{pythoncode} c.put("kv", "somekey", {"v": "somevalue"}) \end{pythoncode} \paragraph{Integers} The HyperDex client recognizes Python's integers and longs and automatically converts them to HyperDex integers. For example: \begin{pythoncode} c.put("k"', "somekey", {"v": 42}) \end{pythoncode} \paragraph{Floats} The HyperDex client recognizes Python's floating point numbers and automatically converts them to HyperDex floats. For example: \begin{pythoncode} c.put("kv", "somekey", {"v": 3.1415}) \end{pythoncode} \paragraph{Lists} The HyperDex client recognizes Python lists and automatically converts them to HyperDex lists. For example: \begin{pythoncode} c.put("kv", "somekey", {"v1": ["a", "b", "c"]}) c.put("kv", "somekey", {"v2": [1, 2, 3]}) c.put("kv", "somekey", {"v3": [1.0, 0.5, 0.25]}) \end{pythoncode} \paragraph{Sets} The HyperDex client recognizes Python sets and automaticaly converts them to HyperDex sets. For example: \begin{pythoncode} require 'set' c.put("kv", "somekey", {"v1": set(["a", "b", "c"])}) c.put("kv", "somekey", {"v2": set([1, 2, 3])}) c.put("kv", "somekey", {"v3": set([1.0, 0.5, 0.25])}) \end{pythoncode} \paragraph{Maps} The HyperDex client recognizes Python dictionaries and automatically converts them to HyperDex maps. For example: \begin{pythoncode} c.put("kv", "somekey", {"v1": {"k": "v"}}) c.put("kv", "somekey", {"v2": {1: 2}}) c.put("kv", "somekey", {"v3": {3.14: 0.125}}) c.put("kv", "somekey", {"v3": {"a": 1}}) \end{pythoncode} \subsection{Attributes} \label{sec:api:python-client:attributes} Attributes in Python are specified in the form of a dictionary from attribute names to their values. As you can see in the examples above, attributes are specified in the form: \begin{pythoncode} {"name": "value"} \end{pythoncode} \subsection{Map Attributes} \label{sec:api:python-client:map-attributes} Map attributes in Python are specified in the form of a nested dictionary. The outer diction key specifies the name, while the inner diction key-value pair specifies the key-value pair of the map. For example: \begin{pythoncode} {"name": {"key": "value"}} \end{pythoncode} \subsection{Predicates} \label{sec:api:python-client:predicates} Predicates in Python are specified in the form of a diction from attribute names to their predicates. In the simple case, the predicate is just a value to be compared against: \begin{pythoncode} {"v": "value"} \end{pythoncode} This is the same as saying: \begin{pythoncode} {"v": hyperdex.client.Equals('value')} \end{pythoncode} The Python bindings support the full range of predicates supported by HyperDex itself. For example: \begin{pythoncode} {"v": hyperdex.client.LessEqual(5)} {"v": hyperdex.client.GreaterEqual(5)} {"v": hyperdex.client.Range(5, 10)} {"v": hyperdex.client.Regex('^s.*')} {"v": hyperdex.client.LengthEquals(5)} {"v": hyperdex.client.LengthLessEqual(5)} {"v": hyperdex.client.LengthGreaterEqual(5)} {"v": hyperdex.client.Contains('value')} \end{pythoncode} \subsection{Error Handling} \label{sec:api:python-client:error-handling} All error handling within the Python bindings is done via the exception handling mechanism of Python. Errors will be raised by the library and should be handled by your application. For example, if we were trying to store an integer (5) as attribute \code{"v"}, where \code{"v"} is actually a string, we'd generate an error. \begin{pythoncode} try: c.put("kv", "my_key", {"v": 5}) except HyperDexClientException as e: print e.status print e.symbol print e \end{pythoncode} Errors of type \code{HyperDexClientException} will contain both a message indicating what went wrong, as well as the underlying \code{enum hyperdex\_client\_returncode}. The member \code{status} indicates the numeric value of this enum, while \code{symbol} returns the enum as a string. The above code will fail with the following output: \begin{verbatim} 8525 HYPERDEX_CLIENT_WRONGTYPE invalid attribute "v": attribute has the wrong type \end{verbatim} \subsection{Operations} \label{sec:api:python-client:ops} \input{\topdir/python/client/ops} \pagebreak \subsection{Working with Signals} \label{sec:api:python-client:signals} The HyperDex client library is signal-safe. Should a signal interrupt the client during a blocking operation, it will raise a \code{HyperDexClientException} with status \code{HYPERDEX\_CLIENT\_INTERRUPTED}. \subsection{Working with Threads} \label{sec:api:python-client:threads} The Python module is fully reentrant. Instances of \code{hyperdex.client.Client} and their associated state may be accessed from multiple threads, provided that the application employs its own synchronization that provides mutual exclusion. Put simply, a multi-threaded application should protect each \code{Client} instance with a mutex or lock to ensure correct operation.
{ "alphanum_fraction": 0.7612352498, "avg_line_length": 31.1171875, "ext": "tex", "hexsha": "f73846a0e9a2b9906152e8a7efe1e57e9c13e113", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "b272e85b08d232993baf6105a4beba833deadfe3", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "cnangel/HyperDex", "max_forks_repo_path": "doc/python/client.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "b272e85b08d232993baf6105a4beba833deadfe3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "cnangel/HyperDex", "max_issues_repo_path": "doc/python/client.tex", "max_line_length": 86, "max_stars_count": 1, "max_stars_repo_head_hexsha": "b272e85b08d232993baf6105a4beba833deadfe3", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "cnangel/HyperDex", "max_stars_repo_path": "doc/python/client.tex", "max_stars_repo_stars_event_max_datetime": "2016-08-10T07:53:58.000Z", "max_stars_repo_stars_event_min_datetime": "2016-08-10T07:53:58.000Z", "num_tokens": 2078, "size": 7966 }
\documentclass[a4paper, 12pt]{report} %%%%%%%%%%%% % Packages % %%%%%%%%%%%% \usepackage[english]{babel} \usepackage[noheader]{packages/sleek} \usepackage{packages/sleek-title} \usepackage{packages/sleek-theorems} \usepackage{packages/sleek-listings} % \usepackage{algorithmic} \usepackage{algorithm} \usepackage{algpseudocode} \usepackage{multirow} \usepackage{hyperref} %%%%%%%%%%%%%% % Title-page % %%%%%%%%%%%%%% \logo{StringMatching.png} \institute{Habib University} \faculty{CS 412: Algorithms} \title{String Matching Problem} \subtitle{Algorithms to solve it and their analysis} \author{\textit{Authors}\\Arham Ahmed \\ Maham Shoaib Patel\\ Murtaza Faisal Shafi\\ Khubaib Naeem Kasbati } \date{\today} %%%%%%%%%%%%%%%% % Bibliography % %%%%%%%%%%%%%%%% \addbibresource{./resources/bib/references.bib} %%%%%%%%%% % Others % %%%%%%%%%% \lstdefinestyle{latex}{ language=TeX, style=default, %%%%% commentstyle=\ForestGreen, keywordstyle=\TrueBlue, stringstyle=\VeronicaPurple, emphstyle=\TrueBlue, %%%%% emph={LaTeX, usepackage, textit, textbf, textsc} } \FrameTBStyle{latex} \def\tbs{\textbackslash} %%%%%%%%%%%% % Document % %%%%%%%%%%%% \begin{document} \maketitle \romantableofcontents \chapter{Introduction to String Matching Problem} The problem of finding occurrence(s) of a pattern string within another string or body of text. There are many different algorithms for efficient searching.\footnote{\hyperlink{https://github.com/murtaza854/CS-412-L1-Project-String-Matching}{You can find all implementations of our algorithms here.}} String Matching Problem is simply finding occurrence(s) of a \texttt{pattern} (substring) in a \texttt{text} (string). \\ Mathematically, the problem is finding occurrence(s) of a \texttt{pattern} $x$ where $x$ is a series of $m$ characters, $x = \langle x_1,x_2,\dots x_m \rangle$, $x_i \in \Sigma, i = 1,2,\dots,m$ in a \texttt{text}, $y$ where $y$ is a series of $n$ characters, $y= \langle y_1,y_2,\dots y_n \rangle, j = 1,2,\dots,n$.\footnote{Definition inspired from CS 369 University of Auckland \cite{Gimelfarb}} \\ We will be focusing on finding all the occurrences of a \texttt{pattern} in \texttt{text}. This problem is relevant in the real world and some of its applications include: \begin{enumerate} \item Plagiarism detection \item DNA sequencing to find patterns of a particular DNA sequence. \item Text editors. \item Searching for files. \item Spam filters. \end{enumerate} % Find one, or more generally, all the occurrences % of a pattern x = [x0x1..xm−1]; xi ∈ Σ; i = 0, . . . , m − 1, in a text % (string) y = [y0y1..yn−1]; yj ∈ Σ; j = 0, . . . , n − 1 % \begin{lstlisting}[style=latexFrameTB, caption={Example of Sleek Template packages usage.}, gobble=8] % \usepackage[english]{babel} % \usepackage[noheader]{packages/sleek} % \usepackage{packages/sleek-title} % \end{lstlisting} % \blindfootnote{If you are a \LaTeX{} beginner consider the excellent \href{https://www.overleaf.com/learn}{Overleaf tutorial}. Also, there are a lot of symbols available in \LaTeX{} and, therefore, in this template. I recommend the use of \enquote{The Comprehensive \LaTeX{} Symbol List} \cite{pakin2020comprehensive} for searching symbols.} \chapter{Solutions to the Problem} In this portion, we will look at 3 algorithms to solve the string matching problem. \begin{enumerate} \item Brute Force / Na\"ive Algorithm \item Rabin-Karp Algorithm \item Knuth-Morris-Pratt Algorithm \end{enumerate} \section{Brute Force / Na\"ive Algorithm} Brute Force or the Na\"ive Algorithm is a relatively easier and simple algorithm to follow. However, the algorithm is slower than other solutions to the problem. It simply creates a window size of size \textit{m} which is equal to the size of the \textit{pattern} being detected in the \textit{text}. It slides the window over every turn and compares the window to the pattern and if it matches then it simply returns a match at the starting index of the window. Some features of the Brute Force algorithm are as follows: \begin{itemize} \item It does not require any pre-processing of the text or the pattern. \item It shifts the window by 1 after every comparison. \item It compares every element in the pattern with every element in the window with no specific order of comparison. \end{itemize} \subsection{Algorithm} The brute force algorithm, as explained above, works through making consecutive comparisons by sliding a window of size \textit{m} (which is the size of the pattern) over a text of size \textit{n}.\\ After the window is created, we then make comparisons of every letter at index \textit{i} of the window with the letter at same index \textit{i} of the pattern. If every element in the window matches that of the pattern then we return the starting index of the window in the text. We can thus write the pseudo code as follows: \newpage \begin{algorithm} \begin{algorithmic} \Procedure{BruteForce}{\texttt{pattern}, \texttt{text}} \For{ $j \gets$ 0 to \texttt{len}(\texttt{text}) $-$ \texttt{len}\texttt{(pattern)} $-\ 1$} \For{$i \gets$ 0 to \texttt{len}\texttt{(pattern)} $-\ 1$} \If {\texttt{text[i+j]} $\neq$ \texttt{pattern[i]}} \State \textbf{break} \EndIf \If {$i$ = \texttt{len(pattern)} $-\ 1$} \State \texttt{print}(i) \EndIf \EndFor \EndFor \EndProcedure \end{algorithmic} \end{algorithm} \subsection{Example} The functionality of the algorithm can be explained by taking the example of the following text: \textbf{ABAACBAABABA} where we need to search for the pattern: \textbf{ABA}. The algorithm will create a window of size \textit{m} which is 3 in this case and create the window over the first three elements of the text as follows: \begin{center} \includegraphics[scale = 0.75]{brute1.PNG} \end{center} As all the elements of the window and the pattern match, a match is produced at the index of the first element of the window in the text as follows: \begin{center} \includegraphics[scale = 0.76]{brute2.PNG} \end{center} The window then slides over by one and compares all the elements of the window with those of the pattern again. If no match is produced, the window slides to the right by 1 and continues its search. \begin{center} \includegraphics[scale = 0.75]{brute3.PNG} \end{center} It continues to do so until it reaches the end and prints/returns the indexes of the the first element of the window after every match is produced. \subsection{Code} The algorithm can be coded in python as follows: \FrameTBStyle{python} \lstinputlisting[style=pythonFrameTB, gobble=4]{BruteForce.py} \subsection{Complexity Analysis} The time complexity of the algorithm is $O(n\cdot m)$ as the upper bound for comparisons is $m\cdot n$. \newpage \section{Rabin-Karp Algorithm} Rabin-Karp is an algorithm which tries to avoid unnecessary comparison through hashing, since comparing two hashes is faster than comparing every character one by one. We use a version of hashing called \textbf{rolling hash}. \subsection{Rolling Hash} A rolling hash is a hash function where the input is hashed in a window which moves across the input. It uses the previously known hash value and the character that enters the window and the one that leaves it in order to compute hash value. For that initially, we need to compute the hash value, which can be done using the following algorithm. \begin{algorithm} \begin{algorithmic} \Function{hash}{\texttt{word}, \texttt{quotient}} \State \texttt{result} $\gets 0$ \State $m \gets$ \texttt{len(word)} \For{$i \gets$ 0 to $m -1$} \State \texttt{result} $\gets$ (\texttt{result + ord(word[i])}$\times 2^{m-1-i}) \mod $ \texttt{quotient} \EndFor \State \Return \texttt{result} \EndFunction \end{algorithmic} \end{algorithm} \\ The Pseudo-code for the rolling hash is as follows \begin{algorithm} \begin{algorithmic} \Function{rollinghash}{\texttt{remove}, \texttt{add}, \texttt{oldvalue}, \texttt{windowsize}, \texttt{quotient}} \State $m \gets $ \texttt{windowsize} \State \Return $(2\times$ \texttt{oldvalue} - \texttt{ord(remove)}$\times 2^m+$ \texttt{ord(b)} $)\mod$ \texttt{quotient} \EndFunction \end{algorithmic} \end{algorithm} % def rehash(a, b, hash_text, m, quotient): % return (2*hash_text - ord(a)*2**(m) + ord(b))%quotient \subsection{Algorithm} Let $m$ be size of \texttt{pattern} and $n$ be size of \texttt{text}. The algorithm is as follows: \begin{enumerate} \item First compute the hash of the first $m$ characters in hash and hash of the \textit{pattern}. \item See if the hash of the first $m$ characters matches with the hash of the \textit{pattern}. If so, check the $m$ characters in the window and see if they actually match with the \textit{pattern}. If it does, that is one of our answers. \item Move the window by 1 and calculate the new hash by using the rolling hash function and see if the new hash matches the hash of the \textit{pattern}. If so, check the $m$ characters in the window and see if they actually match with the \textit{pattern}. If it does, that is one of our answers.If the window doesn't end on the last character in the \textit{text}, then repeat this step. \end{enumerate} The Pseudo-code for the algorithm is as follows: \pagebreak \begin{algorithm} \begin{algorithmic} \Procedure{RabinKarp}{\texttt{pattern}, \texttt{text}, \texttt{quotient}} \State $m \gets$ \texttt{len(pattern)} \State $n \gets $ \texttt{len(text)} \State hashpattern $\gets$ HASH(\texttt{pattern},\texttt{quotient}) \State hashtext $\gets$ HASH(\texttt{text[:m]},\texttt{quotient}) \For{$j \gets 0$ to $n-m-1$} \If{hashtext = hashpattern \textbf{and} \texttt{text[j:j+m]} = \texttt{pattern}} \State \texttt{print(j)} \EndIf \If{j+m < n} \State hashtext $\gets$ ROLLINGHASH(\texttt{text[j]}, \texttt{text[j+m]}, \texttt{hashtext}, m, \texttt{quotient}) \EndIf \EndFor \EndProcedure \end{algorithmic} \end{algorithm} \subsection{Example} The following is the steps taken when the algorithm is given the parameters, \texttt{pattern =} ``CDD'', \texttt{text =} ``CDDCDD'', \texttt{quotient =} 524287 \begin{verbatim} text = CDD , hash_text = 472 , hash_word = 472 Pattern is found at position: 0 text = DDC , hash_text = 475 , hash_word = 472 text = DCD , hash_text = 474 , hash_word = 472 text = CDD , hash_text = 472 , hash_word = 472 Pattern is found at position: 3 \end{verbatim} \subsection{Code} \FrameTBStyle{python} \lstinputlisting[style=pythonFrameTB, gobble=4]{RabinKarp.py} \subsection{Complexity Analysis} The Complexity for this algorithm in the worst case is $O(m\cdot n)$ and a case where this can happen is when we have a poor hash function, where the hash function results in a lot of false positives and has to check every pattern like the brute force. But the best/average case is $O(n+m)$ since a lot of character checks are skipped due to hashing. \newpage \section{Knuth-Morris-Pratt Algorithm } KMP algorithm \cite{Knuthf1977} was developed by Knuth Morris and Pratt independently and it is the first ever \textbf{linear time algorithm} for string matching. Unlike brute force, it totally avoids the re-examination of the previously matched characters. \subsection{Algorithm with an example} In order to understand the algorithm, we will first develop it intuitively by working on an example where \texttt{pattern} $=$ \texttt{onions} and \texttt{text} $=$ \texttt{onionions} \\ Every time brute force fails, it starts matching from the next character. In doing this we are not employing the knowledge from our last matching. We can use that knowledge and features of the \textit{pattern} to know where the next matching should begin from or how many characters we can skip. These features of the pattern are stored in a prefix table, where for each of the possible string index \(\{1,2,\dots m\}\) ,prefix\([i], i \in \{1,2,\dots m\}\), stores the length of the longest prefix of p which is also a \textbf{proper} suffix of the substring \texttt{pattern[1,\dots, i]}. The following is an example of the prefix table. \begin{table}[h] \centering \begin{tabular}{l|l|l|l|l|l|l|} \cline{2-7} \textit{i} & 0 & 1 & 2 & 3 & 4 & 5 \\ \hline \texttt{pattern[i]} & o & n & i & o & n & s \\ \hline \texttt{LPS[i]} & 0 & 0 & 0 & 1 & 2 & 0 \\ \hline \end{tabular} \caption{Prefix Table} \label{tab:Prefixtable} \end{table} \\ This prefix table can tell us how many characters of the pattern, we don't have to match again if there is a mismatch. \\ In our example the following happens: \begin{enumerate} \item The word matches till onion. Then there is a mismatch in the letter between \textbf{i} in the \textit{text} and \textbf{s} in the \textit{pattern}. \item Our LPS array tells us since \textbf{on} is the common prefix and suffix, we can skip the first 2 characters of the \textit{pattern} and try to match the 3rd character of \textit{pattern} with \textbf{i}. \item There is a match between 3rd character of \textit{pattern} and \textbf{i} and as we move till the end of the string, there is a match and we find the instance of onions. \end{enumerate} \subsection{Prefix Table} we start by taking two pointers, \textit{len} and \textit{i} and set them to $0$ and $1$ respectively and take an array of the size of the pattern and name it \texttt{LPS} for ease. If there is a match at index \textit{len} and index \textit{i}, we will store \textit{len} $+\ 1$ at i'th index of \texttt{LPS} array. If there is a mismatch and the \textit{len} pointer has not incremented, we will just increment \textit{i} and continue with our search. If \textit{len} pointer has incremented and there is a mismatch then we will simply check the value at the index before \textit{len} pointer in the \texttt{LPS} array (\texttt{LPS[len-1])} and assign that value to \textit{len}. We will continue this until the $i$ pointer has reached the end. Here is an example, Consider a string: \\ \textbf{ABCDABEABF} \textit{len} and $i$ are 0. As we can see in the string, character `A' is occurring at $i = 4$, we put \textit{len} $+\ 1$ at \texttt{LPS[i]}, we increment \textit{len} and $i$ then we see `B' after it at $i = 5$, which is matching with the second character of the string so we put $2$ (\textit{len} $+\ 1 = 2$) at \texttt{LPS[i]}. we can observe that `AB' is a suffix which is occurring in the string hence we have accounted that. \begin{table}[h] \centering \begin{tabular}{l|l|l|l|l|l|l|l|l|l|l|l} \cline{2-11} \textit{i} & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 \\ \hline \texttt{pattern[i]} & \textbf{A} & \textbf{B} & C & D & \textbf{A} & \textbf{B} & E & A & B & F \\ \hline \texttt{LPS[i]} & 0 & 0 & 0 & 0 & 1 & 2 & & & & \\ \hline \end{tabular} \caption{Prefix Table} \label{tab:Prefixtable2} \end{table} We can see that the character after `B' is not matching with first and second character and we see that \textit{len} pointer has incremented and there is a mismatch, so we will simply check the value at the index before \textit{len} pointer in the \texttt{LPS} array \texttt{(LPS[len-1])} and assign that value to \textit{len}. and continue this strategy until we reach the end. \begin{table}[h] \centering \begin{tabular}{l|l|l|l|l|l|l|l|l|l|l|} \cline{2-11} \textit{i} & 0 & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9\\ \hline \texttt{pattern[i]} & A & B & C & D & A & B & E & A & B & F \\ \hline \texttt{LPS[i]} & 0 & 0 & 0 & 0 & 1 & 2 & 0 & 1 & 2 & 0 \\ \hline \end{tabular} \caption{Prefix Table} \label{tab:Prefixtable3} \end{table} \subsection{Pseudocode} The Pseudocode for Knuth Morris Algorithm is as follows: \newpage \begin{algorithm} \begin{algorithmic} \Procedure{KMPSearch}{\texttt{pattern},\texttt{text}} \State \texttt{LPS} $\gets $ LPSARRAY(\texttt{pattern}) \State $i \gets 0$ \State $j \gets 0$ \While{ $i < $ \texttt{len(text)}} \If{\texttt{text[i]} $=$ \texttt{pattern[j]}} \State $i \gets i + 1$ \State $j \gets j + 1$ \Else \If{j $\neq$ 0} \State $j \gets $ \texttt{LPS}[$j-1$] \Else \State $i \gets i +1$ \EndIf \EndIf \If{ $j=$ \texttt{len(pattern)}} \State \textbf{print}($i-j$) \State $j \gets$ \texttt{LPS}[$j-1$] \EndIf \EndWhile \EndProcedure \end{algorithmic} \end{algorithm} \newpage The algorithm to compute the LPS array is as follows: \begin{algorithm} \begin{algorithmic} \Function{LPSArray}{\texttt{pattern}} \State \texttt{prefixlen} $\gets 0$ \State $i \gets 1$ \State \texttt{prefixlen} $\gets 0$ \State \texttt{LPS} [ \texttt{len(pattern)} ] \State \texttt{LPS} $\gets$ [0,\dots,0] \While{$i<$ \texttt{len(pattern)}} \If{\texttt{pattern[i]} = \texttt{pattern[prefixlen]}} \State \texttt{LPS}[$i$] $\gets $ \texttt{prefixlen} $+\ 1$ \State \texttt{prefixlen} $\gets $ \texttt{prefixlen} $+\ 1$ \State $i \gets i+1$ \Else \If{\texttt{prefixlen} $\neq$ 0} \State \texttt{prefixlen} $\gets$ \texttt{LPS}[\texttt{prefixlen} $-\ 1$] \Else \State \texttt{LPS}[$i$] $\gets$ 0 \State $i \gets i+1$ \EndIf \EndIf \EndWhile \State \Return \texttt{LPS} \EndFunction \end{algorithmic} \end{algorithm} \subsection{Code} \FrameTBStyle{python} \lstinputlisting[style=pythonFrameTB, gobble=4]{Knuth.py} \subsection{Complexity Analysis} Let length of \texttt{pattern} $= m$ and length of \texttt{text} $= n$.The creation of the \texttt{LPS} array takes $O(m)$ time, since i needs to be incremented $O(m)$ times, and the prefixlen change command decreases prefixlen by at least 1, and can be maximum incremented $O(m)$ times in the algorithm hence that statement runs max $O(m)$ times giving complexity $O(m)$ \\ The main KMP Algorithm i pointer is incremented $O(n)$ times and j pointer can be rolled back when there are mismatches. Since j can only roll back as much as it has progressed upto mismatch, each character is examined atmost twice, hence there are at most $O(n)$ comparison leading to the algorithm complexity as $O(n+m)$ \newpage \chapter{Empirical Analysis} Complexities gives us a fair idea of which algorithm are better in theory. In reality there are other costs as well that hidden in complexity. In complexity analysis, we usually see the worst case, but those are just upper bounds and in reality may not be the same. It also helps compare algorithms with the same complexity, for example Rabin-Karp and Brute Force who have the same worst case analysis. \section{Review of Complexities} The below are the theoretical complexities that we knew. We can use them to see whether our results match what we would expect or not. \begin{table}[h] \centering \begin{tabular}{|l|l|} \hline \textbf{Algorithm} & \textbf{Complexity} \\ \hline Brute Force Algorithm & Worst case: $O(nm)$ \\ \hline \multicolumn{1}{|c|}{\multirow{2}{*}{Rabin-Karp Algorithm}} & Worst case: $O(nm)$ \\ \cline{2-2} \multicolumn{1}{|c|}{} & Average/Best Case: $O(n+m)$ \\ \hline KMP Algorithm & Worst case: $O(n+m)$ \\ \hline \end{tabular} \caption{Theoretical Complexities} \label{tab:Complexities} \end{table} \section{How Analysis was done} The analysis was done on the same computer and in the same language (Python) in order to ensure fair comparison. \begin{enumerate} \item To see the impact of text size, text size was varied from 5000 to 1,000,000 in steps of 5,000 \item To see the impact of pattern size, pattern size was varied from 100 to 10000 in steps of 50. \item In the testing stage, the algorithm was run thrice for each pair of \texttt{(len(text)},\\ \texttt{len(pattern))} done and the average time was taken. This is to account for different CPU loads and to even out the issues. \item Printing was disabled so only the time for computation is taken into account. \end{enumerate} \section{Impact of size of \texttt{text}} \begin{figure}[H] \centering \includegraphics[width=0.8\textwidth]{n_analysis.png} \noskipcaption{Analysis increasing size of $n$} % \label{fig:random_university_logo} \end{figure} As size of text increase, time taken increases at a linear rate which is what we expect from complexities since m is fixed. \\ Knuth Morris Pratt Algorithm is taking the least amount of time as we would expect since its complexity is linear and comparisons of a character happen at most 2 times. \\ There is surprising result between Rabin-Karp and Brute Force. Brute Force is much more efficient than Rabin-Karp. The reason for this can be the high cost of hashing which the brute force avoids, there might also be some collisions which result in Rabin-Karp doing more computation in the best case. \section{Impact of size of pattern} \begin{figure}[H] \centering \includegraphics[width=0.8\textwidth]{m_analysis.png} \noskipcaption{Analysis increasing size of $m$} % \label{fig:random_university_logo} \end{figure} Knuth Morris Pratt Algorithm takes almost constant as we would expect since $n>m$. Rabin-Karp increases way faster than others. This is because as pattern size increases probability of collisions leading to false positives and hence lot of checking by brute force coupled with cost of hashing, leading to huge growth. Brute force case is strange. This can be because the strings are randomly generated so we don’t have to scan entire string to know it is bad. So worse case isn’t hit. \section{Conclusions} KMP Algorithm seems to have the best performance with Brute Force close behind and maybe good for small strings. Rabin-Karp is the slowest but it still is useful, since we can use it to pattern match for multiple patterns at once saving time. The code for evaluation can be found in the repository. \printbibliography \end{document}
{ "alphanum_fraction": 0.6725025747, "avg_line_length": 53.0842824601, "ext": "tex", "hexsha": "2844f9285ba0bef67ea9220cc34426e938b505cd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "44bc7a5808d66fcafdf7da60d96f3f85381c5363", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "murtaza854/CS-412-L1-Project-String-Matching", "max_forks_repo_path": "main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "44bc7a5808d66fcafdf7da60d96f3f85381c5363", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "murtaza854/CS-412-L1-Project-String-Matching", "max_issues_repo_path": "main.tex", "max_line_length": 488, "max_stars_count": null, "max_stars_repo_head_hexsha": "44bc7a5808d66fcafdf7da60d96f3f85381c5363", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "murtaza854/CS-412-L1-Project-String-Matching", "max_stars_repo_path": "main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6648, "size": 23304 }
\hypertarget{classglite_1_1wmsui_1_1api_1_1Request}{ \section{glite::wmsui::api::Request Class Reference} \label{classglite_1_1wmsui_1_1api_1_1Request}\index{glite::wmsui::api::Request@{glite::wmsui::api::Request}} } Allow creating the job and controlling it during its lifetime. {\tt \#include $<$Request.h$>$} \subsection*{Public Member Functions} \begin{Indent}{\bf Constructors/Destructor}\par \begin{CompactItemize} \item \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z9_0}{Request} () \item \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z9_1}{Request} (const glite::wmsutils::jobid::Job\-Id \&id) \item \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z9_2}{Request} (const glite::wms::jdl::Exp\-Dag\-Ad \&ad) \item \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z9_3}{Request} (const glite::wms::jdl::Job\-Ad \&ad) \item \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z9_4}{Request} (const \hyperlink{classglite_1_1wmsui_1_1api_1_1Request}{Request} \&\hyperlink{classglite_1_1wmsui_1_1api_1_1Request}{Request}) \item virtual \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z9_5}{$\sim$Request} () throw () \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z9_6}{operator=} (const \hyperlink{classglite_1_1wmsui_1_1api_1_1Request}{Request} \&dag) \end{CompactItemize} \end{Indent} \begin{Indent}{\bf Get/Set Methods}\par \begin{CompactItemize} \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z11_0}{set\-Cred\-Path} (const std::string cp) \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z11_1}{unset\-Cred\-Path} () \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z11_2}{set\-Logger\-Level} (unsigned int level) \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z11_3}{set\-Job\-Ad} (const glite::wms::jdl::Job\-Ad \&ad) \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z11_4}{set\-Dag\-Ad} (const glite::wms::jdl::Exp\-Dag\-Ad \&ad) \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z11_5}{set\-Dag\-Id} (const glite::wmsutils::jobid::Job\-Id \&id) \end{CompactItemize} \end{Indent} \begin{Indent}{\bf Job Action Methods}\par \begin{CompactItemize} \item glite::lb::Job\-Status \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z13_0}{get\-Status} (bool ad=true) \item std::vector$<$ glite::lb::Event $>$ \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z13_1}{get\-Log\-Info} () \item glite::wmsutils::jobid::Job\-Id \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z13_2}{submit} (const std::string \&ns\-Host, int ns\-Port, const std::string \&lb\-Host, int lb\-Port, const std::string \&ceid=\char`\"{}\char`\"{}) \item std::vector$<$ std::string $>$ \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z13_3}{list\-Matching\-CE} (const std::string \&ns\-Host, int ns\-Port) \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z13_4}{cancel} () \item void \hyperlink{classglite_1_1wmsui_1_1api_1_1Request_z13_5}{get\-Output} (const std::string \&dir\_\-path) \end{CompactItemize} \end{Indent} \subsection{Detailed Description} Allow creating the job and controlling it during its lifetime. Allow controlling the Dag The \hyperlink{classglite_1_1wmsui_1_1api_1_1Job}{Job} class provides methods that allow controlling the job during its lifetime. It currently encompasses routines for cancelling a job and retrieving its output, but if needed it will be extended to provide other features such as job checkpointing, holding, releasing etc. \begin{Desc} \item[Version:]0.1 \end{Desc} \begin{Desc} \item[Date:]15 April 2002 \end{Desc} \begin{Desc} \item[Author:]Alessandro Maraschini $<$\href{mailto:[email protected]}{\tt [email protected]}$>$ \end{Desc} \subsection{Constructor \& Destructor Documentation} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z9_0}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!Request@{Request}} \index{Request@{Request}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[Request]{\setlength{\rightskip}{0pt plus 5cm}glite::wmsui::api::Request::Request ()}} \label{classglite_1_1wmsui_1_1api_1_1Request_z9_0} Instantiates an empty \hyperlink{classglite_1_1wmsui_1_1api_1_1Job}{Job} object \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z9_1}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!Request@{Request}} \index{Request@{Request}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[Request]{\setlength{\rightskip}{0pt plus 5cm}glite::wmsui::api::Request::Request (const glite::wmsutils::jobid::Job\-Id \& {\em id})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z9_1} Instantiates an \hyperlink{classglite_1_1wmsui_1_1api_1_1Job}{Job} object with a Job\-Id \begin{Desc} \item[Parameters:] \begin{description} \item[{\em id}]the Jobid instance \end{description} \end{Desc} \begin{Desc} \item[Exceptions:] \begin{description} \item[{\em Job\-Operation\-Exception}]If the Job\-Id is empty \end{description} \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z9_2}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!Request@{Request}} \index{Request@{Request}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[Request]{\setlength{\rightskip}{0pt plus 5cm}glite::wmsui::api::Request::Request (const glite::wms::jdl::Exp\-Dag\-Ad \& {\em ad})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z9_2} Instantiates an \hyperlink{classglite_1_1wmsui_1_1api_1_1Job}{Job} object with a Exp\-Dag\-Ad \begin{Desc} \item[Parameters:] \begin{description} \item[{\em ad}]the Expr\-Dag\-Ad instance \end{description} \end{Desc} \begin{Desc} \item[Exceptions:] \begin{description} \item[{\em Job\-Operation\-Exception}]If the Dag\-Ad is empty \end{description} \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z9_3}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!Request@{Request}} \index{Request@{Request}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[Request]{\setlength{\rightskip}{0pt plus 5cm}glite::wmsui::api::Request::Request (const glite::wms::jdl::Job\-Ad \& {\em ad})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z9_3} Instantiates an \hyperlink{classglite_1_1wmsui_1_1api_1_1Job}{Job} object with a Job\-Ad \begin{Desc} \item[Parameters:] \begin{description} \item[{\em ad}]the Job\-Ad instance \end{description} \end{Desc} \begin{Desc} \item[Exceptions:] \begin{description} \item[{\em Job\-Operation\-Exception}]If the Job\-Ad is empty \end{description} \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z9_4}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!Request@{Request}} \index{Request@{Request}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[Request]{\setlength{\rightskip}{0pt plus 5cm}glite::wmsui::api::Request::Request (const \hyperlink{classglite_1_1wmsui_1_1api_1_1Request}{Request} \& {\em Request})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z9_4} Copy constructor \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z9_5}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!~Request@{$\sim$Request}} \index{~Request@{$\sim$Request}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[$\sim$Request]{\setlength{\rightskip}{0pt plus 5cm}virtual glite::wmsui::api::Request::$\sim$\hyperlink{classglite_1_1wmsui_1_1api_1_1Request}{Request} () throw ()\hspace{0.3cm}{\tt \mbox{[}virtual\mbox{]}}}} \label{classglite_1_1wmsui_1_1api_1_1Request_z9_5} destructor \subsection{Member Function Documentation} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z13_4}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!cancel@{cancel}} \index{cancel@{cancel}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[cancel]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::cancel ()}} \label{classglite_1_1wmsui_1_1api_1_1Request_z13_4} Cancel the job from the Network Server \begin{Desc} \item[Returns:]The Result of the operation \end{Desc} \begin{Desc} \item[Exceptions:] \begin{description} \item[{\em Job\-Operation\-Exception}]The Operation required is not allowed for the \hyperlink{classglite_1_1wmsui_1_1api_1_1Job}{Job} \end{description} \end{Desc} \begin{Desc} \item[See also:]exception returned from NS \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z13_1}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!getLogInfo@{getLogInfo}} \index{getLogInfo@{getLogInfo}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[getLogInfo]{\setlength{\rightskip}{0pt plus 5cm}std::vector$<$glite::lb::Event$>$ glite::wmsui::api::Request::get\-Log\-Info ()}} \label{classglite_1_1wmsui_1_1api_1_1Request_z13_1} Retrieve the bookkeeping information of the job \begin{Desc} \item[Returns:]all the events logged during the job life \end{Desc} \begin{Desc} \item[See also:]glite::lb::Event class documentation \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z13_5}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!getOutput@{getOutput}} \index{getOutput@{getOutput}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[getOutput]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::get\-Output (const std::string \& {\em dir\_\-path})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z13_5} Retrieve output files of a submitted job \begin{Desc} \item[Parameters:] \begin{description} \item[{\em dir\_\-path}]the path where to retrieve the Output\-Sandbox files \end{description} \end{Desc} \begin{Desc} \item[Exceptions:] \begin{description} \item[{\em Job\-Operation\-Exception}]The Operation required is not allowed for the \hyperlink{classglite_1_1wmsui_1_1api_1_1Job}{Job} \end{description} \end{Desc} \begin{Desc} \item[See also:]exception returned from NS \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z13_0}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!getStatus@{getStatus}} \index{getStatus@{getStatus}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[getStatus]{\setlength{\rightskip}{0pt plus 5cm}glite::lb::Job\-Status glite::wmsui::api::Request::get\-Status (bool {\em ad} = true)}} \label{classglite_1_1wmsui_1_1api_1_1Request_z13_0} Retrieve the status of the job \begin{Desc} \item[Parameters:] \begin{description} \item[{\em ad}]if set to false only basic info are retrieved \end{description} \end{Desc} \begin{Desc} \item[Returns:]the status of the requested component \end{Desc} \begin{Desc} \item[See also:]glite::lb::Job\-Status class documentation \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z13_3}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!listMatchingCE@{listMatchingCE}} \index{listMatchingCE@{listMatchingCE}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[listMatchingCE]{\setlength{\rightskip}{0pt plus 5cm}std::vector$<$std::string$>$ glite::wmsui::api::Request::list\-Matching\-CE (const std::string \& {\em ns\-Host}, int {\em ns\-Port})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z13_3} Look for matching resources \begin{Desc} \item[Parameters:] \begin{description} \item[{\em ns\-Host}]The Network Server host address \item[{\em ns\-Port}]The Network Server port \end{description} \end{Desc} \begin{Desc} \item[Returns:]the Computing elements that match with the specified JDL \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z9_6}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!operator=@{operator=}} \index{operator=@{operator=}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[operator=]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::operator= (const \hyperlink{classglite_1_1wmsui_1_1api_1_1Request}{Request} \& {\em dag})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z9_6} Assignment operator \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z11_0}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!setCredPath@{setCredPath}} \index{setCredPath@{setCredPath}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[setCredPath]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::set\-Cred\-Path (const std::string {\em cp})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z11_0} Set a different Proxy certificate from the default one \begin{Desc} \item[Parameters:] \begin{description} \item[{\em cp}]The full path of the proxy certificate file to be set \end{description} \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z11_4}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!setDagAd@{setDagAd}} \index{setDagAd@{setDagAd}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[setDagAd]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::set\-Dag\-Ad (const glite::wms::jdl::Exp\-Dag\-Ad \& {\em ad})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z11_4} set the Job\-Ad instance \begin{Desc} \item[Parameters:] \begin{description} \item[{\em ad}]the Job\-Ad Instance to set \end{description} \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z11_5}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!setDagId@{setDagId}} \index{setDagId@{setDagId}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[setDagId]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::set\-Dag\-Id (const glite::wmsutils::jobid::Job\-Id \& {\em id})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z11_5} set the Job\-Id instance \begin{Desc} \item[Parameters:] \begin{description} \item[{\em id}]the Job\-Id Instance to set \end{description} \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z11_3}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!setJobAd@{setJobAd}} \index{setJobAd@{setJobAd}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[setJobAd]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::set\-Job\-Ad (const glite::wms::jdl::Job\-Ad \& {\em ad})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z11_3} set the Job\-Ad instance \begin{Desc} \item[Parameters:] \begin{description} \item[{\em ad}]the Job\-Ad Instance to set \end{description} \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z11_2}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!setLoggerLevel@{setLoggerLevel}} \index{setLoggerLevel@{setLoggerLevel}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[setLoggerLevel]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::set\-Logger\-Level (unsigned int {\em level})\hspace{0.3cm}{\tt \mbox{[}inline\mbox{]}}}} \label{classglite_1_1wmsui_1_1api_1_1Request_z11_2} Se the verbosity level for NS debug \begin{Desc} \item[Parameters:] \begin{description} \item[{\em level}]default value = 0 (no verbosity), max value = 6 (dreadful verbosity, print screen) \end{description} \end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z13_2}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!submit@{submit}} \index{submit@{submit}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[submit]{\setlength{\rightskip}{0pt plus 5cm}glite::wmsutils::jobid::Job\-Id glite::wmsui::api::Request::submit (const std::string \& {\em ns\-Host}, int {\em ns\-Port}, const std::string \& {\em lb\-Host}, int {\em lb\-Port}, const std::string \& {\em ceid} = \char`\"{}\char`\"{})}} \label{classglite_1_1wmsui_1_1api_1_1Request_z13_2} Submit the job to the Network Server \begin{Desc} \item[Parameters:] \begin{description} \item[{\em ns\-Host}]The Network Server host address \item[{\em ns\-Port}]The Network Server port \item[{\em lb\-Host}]The LB Server host address \item[{\em lb\-Port}]The LB Server port \item[{\em ceid}]the resource id where the job has to be executed \end{description} \end{Desc} \begin{Desc} \item[Returns:]the Job\-Id representing the submitted job\end{Desc} \hypertarget{classglite_1_1wmsui_1_1api_1_1Request_z11_1}{ \index{glite::wmsui::api::Request@{glite::wmsui::api::Request}!unsetCredPath@{unsetCredPath}} \index{unsetCredPath@{unsetCredPath}!glite::wmsui::api::Request@{glite::wmsui::api::Request}} \subsubsection[unsetCredPath]{\setlength{\rightskip}{0pt plus 5cm}void glite::wmsui::api::Request::unset\-Cred\-Path ()}} \label{classglite_1_1wmsui_1_1api_1_1Request_z11_1} Set the Proxy certificate as default The documentation for this class was generated from the following file:\begin{CompactItemize} \item \hyperlink{Request_8h}{Request.h}\end{CompactItemize}
{ "alphanum_fraction": 0.7691664676, "avg_line_length": 51.8513931889, "ext": "tex", "hexsha": "3bc02b27664a27fd37cda59329401f409dcfe195", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5b2adda72ba13cf2a85ec488894c2024e155a4b5", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "italiangrid/wms", "max_forks_repo_path": "users-guide/WMS/autogen/jobsubmission/classglite_1_1wmsui_1_1api_1_1Request.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5b2adda72ba13cf2a85ec488894c2024e155a4b5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "italiangrid/wms", "max_issues_repo_path": "users-guide/WMS/autogen/jobsubmission/classglite_1_1wmsui_1_1api_1_1Request.tex", "max_line_length": 348, "max_stars_count": 1, "max_stars_repo_head_hexsha": "5b2adda72ba13cf2a85ec488894c2024e155a4b5", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "italiangrid/wms", "max_stars_repo_path": "users-guide/WMS/autogen/jobsubmission/classglite_1_1wmsui_1_1api_1_1Request.tex", "max_stars_repo_stars_event_max_datetime": "2019-01-18T02:19:18.000Z", "max_stars_repo_stars_event_min_datetime": "2019-01-18T02:19:18.000Z", "num_tokens": 6343, "size": 16748 }
% !TeX program = xelatex % LaTeX Lab Rept Template % % From: https://www.dur.ac.uk/resources/physics/students/assessment/LatexExampleLabReportTemplate.tex % % JRM downloaded 2019-04-24, Last Modified: 2019-12-02 % % % NOTE -0 this uses an internal bibiography % \documentclass[12pt, onecolumn]{revtex4} % Font size (10,11 or 12pt) and column number (one or two). \usepackage{times} % Times New Roman font type \usepackage[a4paper, left=1.85cm, right=1.85cm, top=1.85cm, bottom=1.85cm]{geometry} % Defines paper size and margin length \usepackage[font=small, labelfont=bf]{caption} % Defines caption font size as 9pt and caption title bolded \usepackage{graphics,graphicx,epsfig,ulem} % Makes sure all graphics works \usepackage{amsmath} % Adds mathematical features for equations \usepackage{etoolbox} % Customise date to preferred format \makeatletter \patchcmd{\frontmatter@RRAP@format}{(}{}{}{} \patchcmd{\frontmatter@RRAP@format}{)}{}{}{} \renewcommand\Dated@name{} \makeatother \usepackage{fancyhdr} \pagestyle{fancy} % Insert header \renewcommand{\headrulewidth}{0pt} \lhead{A. Student} % Your name \rhead{Example Lab Report Title} % Your report title \def\bibsection{\section*{References}} % Position refernce section correctly %%%%% Document %%%%% \begin{document} \title{Example Lab Report} \date{Submitted: \today{}, Date of Experiment: EXPERIMENT DATE} \author{A. Student (and L. Partner)} \affiliation{\normalfont L1 Discovery Labs, Lab Group XXX, Lab Day} \begin{abstract} ABSTRACT HERE \end{abstract} \maketitle \thispagestyle{plain} % produces page number for front page \section{Introduction} Paragraph one Paragraph two \section{Methods} Paragraph one Paragraph two \section{Results} Paragraph one Paragraph two \section{Discussion} Paragraph one Paragraph two \section{Conclusions} Some awesome \begin{acknowledgments} (OPTIONAL) The author would like to thank... \end{acknowledgments} \begin{thebibliography}{} \bibitem{ref01} A. N. Other, Title of the Book, edition, publishers, place of publication (year of publication), p. 123. % example reference \end{thebibliography} \end{document}
{ "alphanum_fraction": 0.687446989, "avg_line_length": 23.1176470588, "ext": "tex", "hexsha": "23728e7b3669d8b424cff9043bbf988c84266df1", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5dd3412b808476bf0354cd0a201a0ac3a101dcf9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "jrminter/jrm-latex-examples", "max_forks_repo_path": "simple-lab-report/simple-lab-report.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5dd3412b808476bf0354cd0a201a0ac3a101dcf9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "jrminter/jrm-latex-examples", "max_issues_repo_path": "simple-lab-report/simple-lab-report.tex", "max_line_length": 143, "max_stars_count": null, "max_stars_repo_head_hexsha": "5dd3412b808476bf0354cd0a201a0ac3a101dcf9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "jrminter/jrm-latex-examples", "max_stars_repo_path": "simple-lab-report/simple-lab-report.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 643, "size": 2358 }
\subsection{io -- Core tools for working with streams} To be done .... %
{ "alphanum_fraction": 0.6756756757, "avg_line_length": 14.8, "ext": "tex", "hexsha": "59ac38d6a27f5ff14b74425e56a1b6b5e75b3e1f", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2016-11-24T19:55:47.000Z", "max_forks_repo_forks_event_min_datetime": "2016-11-24T19:55:47.000Z", "max_forks_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "remigiusz-suwalski/programming-notes", "max_forks_repo_path": "src/python3/sections/io.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "remigiusz-suwalski/programming-notes", "max_issues_repo_path": "src/python3/sections/io.tex", "max_line_length": 54, "max_stars_count": 1, "max_stars_repo_head_hexsha": "dd7d6f30d945733f7ed792fcccd33875b59d240f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "remigiusz-suwalski/programming-notes", "max_stars_repo_path": "src/python3/sections/io.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-28T05:03:18.000Z", "max_stars_repo_stars_event_min_datetime": "2022-02-28T05:03:18.000Z", "num_tokens": 17, "size": 74 }
\hypertarget{section}{% \section{1}\label{section}} \bibverse{1} Simon Peter, a servant and apostle of Jesus Christ,\footnote{1:1 ``Christ'' means ``Anointed One''.} to those who have obtained a like precious faith with us in the righteousness of our God and Saviour, Jesus Christ: \bibverse{2} Grace to you and peace be multiplied in the knowledge of God and of Jesus our Lord, \bibverse{3} seeing that his divine power has granted to us all things that pertain to life and godliness, through the knowledge of him who called us by his own glory and virtue, \bibverse{4} by which he has granted to us his precious and exceedingly great promises; that through these you may become partakers of the divine nature, having escaped from the corruption that is in the world by lust. \bibverse{5} Yes, and for this very cause adding on your part all diligence, in your faith supply moral excellence; and in moral excellence, knowledge; \bibverse{6} and in knowledge, self-control; and in self-control, perseverance; and in perseverance, godliness; \bibverse{7} and in godliness, brotherly affection; and in brotherly affection, love. \bibverse{8} For if these things are yours and abound, they make you to not be idle or unfruitful in the knowledge of our Lord Jesus Christ. \bibverse{9} For he who lacks these things is blind, seeing only what is near, having forgotten the cleansing from his old sins. \bibverse{10} Therefore, brothers,+ 1:10 The word for ``brothers'' here and where context allows may also be correctly translated ``brothers and sisters'' or ``siblings.'' be more diligent to make your calling and election sure. For if you do these things, you will never stumble. \bibverse{11} For thus you will be richly supplied with the entrance into the eternal Kingdom of our Lord and Saviour, Jesus Christ. \bibverse{12} Therefore I will not be negligent to remind you of these things, though you know them and are established in the present truth. \bibverse{13} I think it right, as long as I am in this tent, to stir you up by reminding you, \bibverse{14} knowing that the putting off of my tent comes swiftly, even as our Lord Jesus Christ made clear to me. \bibverse{15} Yes, I will make every effort that you may always be able to remember these things even after my departure. \bibverse{16} For we didn't follow cunningly devised fables when we made known to you the power and coming of our Lord Jesus Christ, but we were eyewitnesses of his majesty. \bibverse{17} For he received from God the Father honour and glory when the voice came to him from the Majestic Glory, ``This is my beloved Son, in whom I am well pleased.''\footnote{1:17 Matthew 17:5; Mark 9:7; Luke 9:35} \bibverse{18} We heard this voice come out of heaven when we were with him on the holy mountain. \bibverse{19} We have the more sure word of prophecy; and you do well that you heed it as to a lamp shining in a dark place, until the day dawns and the morning star arises in your hearts, \bibverse{20} knowing this first, that no prophecy of Scripture is of private interpretation. \bibverse{21} For no prophecy ever came by the will of man, but holy men of God spoke, being moved by the Holy Spirit. \hypertarget{section-1}{% \section{2}\label{section-1}} \bibverse{1} But false prophets also arose amongst the people, as false teachers will also be amongst you, who will secretly bring in destructive heresies, denying even the Master who bought them, bringing on themselves swift destruction. \bibverse{2} Many will follow their immoral\footnote{2:2 TR reads ``destructive'' instead of ``immoral''} ways, and as a result, the way of the truth will be maligned. \bibverse{3} In covetousness they will exploit you with deceptive words: whose sentence now from of old doesn't linger, and their destruction will not slumber. \bibverse{4} For if God didn't spare angels when they sinned, but cast them down to Tartarus,\footnote{2:4 Tartarus is another name for Hell} and committed them to pits of darkness to be reserved for judgement; \bibverse{5} and didn't spare the ancient world, but preserved Noah with seven others, a preacher of righteousness, when he brought a flood on the world of the ungodly, \bibverse{6} and turning the cities of Sodom and Gomorrah into ashes, condemned them to destruction, having made them an example to those who would live in an ungodly way, \bibverse{7} and delivered righteous Lot, who was very distressed by the lustful life of the wicked \bibverse{8} (for that righteous man dwelling amongst them was tormented in his righteous soul from day to day with seeing and hearing lawless deeds), \bibverse{9} then the Lord knows how to deliver the godly out of temptation and to keep the unrighteous under punishment for the day of judgement, \bibverse{10} but chiefly those who walk after the flesh in the lust of defilement and despise authority. Daring, self-willed, they are not afraid to speak evil of dignitaries, \bibverse{11} whereas angels, though greater in might and power, don't bring a slanderous judgement against them before the Lord. \bibverse{12} But these, as unreasoning creatures, born natural animals to be taken and destroyed, speaking evil in matters about which they are ignorant, will in their destroying surely be destroyed, \bibverse{13} receiving the wages of unrighteousness; people who count it pleasure to revel in the daytime, spots and defects, revelling in their deceit while they feast with you; \bibverse{14} having eyes full of adultery, and who can't cease from sin, enticing unsettled souls, having a heart trained in greed, accursed children! \bibverse{15} Forsaking the right way, they went astray, having followed the way of Balaam the son of Beor, who loved the wages of wrongdoing; \bibverse{16} but he was rebuked for his own disobedience. A speechless donkey spoke with a man's voice and stopped the madness of the prophet. \bibverse{17} These are wells without water, clouds driven by a storm, for whom the blackness of darkness has been reserved forever. \bibverse{18} For, uttering great swelling words of emptiness, they entice in the lusts of the flesh, by licentiousness, those who are indeed escaping from those who live in error; \bibverse{19} promising them liberty, while they themselves are bondservants of corruption; for a man is brought into bondage by whoever overcomes him. \bibverse{20} For if, after they have escaped the defilement of the world through the knowledge of the Lord and Saviour Jesus Christ, they are again entangled in it and overcome, the last state has become worse for them than the first. \bibverse{21} For it would be better for them not to have known the way of righteousness, than after knowing it, to turn back from the holy commandment delivered to them. \bibverse{22} But it has happened to them according to the true proverb, ``The dog turns to his own vomit again,''\footnote{2:22 Proverbs 26:11} and ``the sow that has washed to wallowing in the mire.'' \hypertarget{section-2}{% \section{3}\label{section-2}} \bibverse{1} This is now, beloved, the second letter that I have written to you; and in both of them I stir up your sincere mind by reminding you \bibverse{2} that you should remember the words which were spoken before by the holy prophets and the commandment of us, the apostles of the Lord and Saviour, \bibverse{3} knowing this first, that in the last days mockers will come, walking after their own lusts \bibverse{4} and saying, ``Where is the promise of his coming? For, from the day that the fathers fell asleep, all things continue as they were from the beginning of the creation.'' \bibverse{5} For they wilfully forget that there were heavens from of old, and an earth formed out of water and amid water by the word of God, \bibverse{6} by which means the world that existed then, being overflowed with water, perished. \bibverse{7} But the heavens that exist now and the earth, by the same word have been stored up for fire, being reserved against the day of judgement and destruction of ungodly men. \bibverse{8} But don't forget this one thing, beloved, that one day is with the Lord as a thousand years, and a thousand years as one day. \bibverse{9} The Lord is not slow concerning his promise, as some count slowness; but he is patient with us, not wishing that anyone should perish, but that all should come to repentance. \bibverse{10} But the day of the Lord will come as a thief in the night, in which the heavens will pass away with a great noise, and the elements will be dissolved with fervent heat; and the earth and the works that are in it will be burnt up. \bibverse{11} Therefore, since all these things will be destroyed like this, what kind of people ought you to be in holy living and godliness, \bibverse{12} looking for and earnestly desiring the coming of the day of God, which will cause the burning heavens to be dissolved, and the elements will melt with fervent heat? \bibverse{13} But, according to his promise, we look for new heavens and a new earth, in which righteousness dwells. \bibverse{14} Therefore, beloved, seeing that you look for these things, be diligent to be found in peace, without defect and blameless in his sight. \bibverse{15} Regard the patience of our Lord as salvation; even as our beloved brother Paul also, according to the wisdom given to him, wrote to you, \bibverse{16} as also in all of his letters, speaking in them of these things. In those, there are some things that are hard to understand, which the ignorant and unsettled twist, as they also do to the other Scriptures, to their own destruction. \bibverse{17} You therefore, beloved, knowing these things beforehand, beware, lest being carried away with the error of the wicked, you fall from your own steadfastness. \bibverse{18} But grow in the grace and knowledge of our Lord and Saviour Jesus Christ. To him be the glory both now and forever. Amen.
{ "alphanum_fraction": 0.7827798526, "avg_line_length": 60.4085365854, "ext": "tex", "hexsha": "e68e93687860647a6cc8b13e7bffb9da69938633", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "bibliadelpueblo/BibliaLibre", "max_forks_repo_path": "Bibles/English.WorldEnglishBibleGB/out/tex/78-2 Peter.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "bibliadelpueblo/BibliaLibre", "max_issues_repo_path": "Bibles/English.WorldEnglishBibleGB/out/tex/78-2 Peter.tex", "max_line_length": 75, "max_stars_count": null, "max_stars_repo_head_hexsha": "039ab9b18364ecade1d56695cb77c40ee62b1317", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "bibliadelpueblo/BibliaLibre", "max_stars_repo_path": "Bibles/English.WorldEnglishBibleGB/out/tex/78-2 Peter.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2541, "size": 9907 }
\hypertarget{list}{% \section{List}\label{list}} Lists are collection objects that contain a sequence of values each associated with an integer index. Create a list like this: \begin{lstlisting} var list = [1, 2, 3] \end{lstlisting} Lookup values using index notation: \begin{lstlisting} list[0] \end{lstlisting} You can change list entries like this: \begin{lstlisting} list[0] = "Hello" \end{lstlisting} Create an empty list: \begin{lstlisting} var list = [] \end{lstlisting} Loop over elements of a list: \begin{lstlisting} for (i in list) print i \end{lstlisting} \hypertarget{append}{% \section{Append}\label{append}} Adds an element to the end of a list: \begin{lstlisting} list = [] list.append("Foo") \end{lstlisting} \hypertarget{insert}{% \section{Insert}\label{insert}} Inserts an element into a list at a specified index: \begin{lstlisting} list = [1,2,3] list.insert(1, "Foo") print list // prints [ 1, Foo, 2, 3 ] \end{lstlisting} \hypertarget{pop}{% \section{Pop}\label{pop}} Remove the last element from a list, returning the element removed: \begin{lstlisting} print list.pop() \end{lstlisting} If an integer argument is supplied, returns and removes that element: \begin{lstlisting} var a = [1,2,3] print a.pop(1) // prints '2' print a // prints [ 1, 3 ] \end{lstlisting} \hypertarget{sort}{% \section{Sort}\label{sort}} Sorts a list: \begin{lstlisting} list.sort() \end{lstlisting} You can provide your own function to use to compare values in the list \begin{lstlisting} list.sort(fn (a, b) a-b) \end{lstlisting} This function should return a negative value if \texttt{a\textless{}b}, a positive value if \texttt{a\textgreater{}b} and \texttt{0} if \texttt{a} and \texttt{b} are equal. \hypertarget{order}{% \section{Order}\label{order}} Returns a list of indices that would, if used in order, would sort a list. For example \begin{lstlisting} var list = [2,3,1] print list.order() // expect: [2,0,1] \end{lstlisting} would produce \texttt{{[}2,0,1{]}} \hypertarget{remove}{% \section{Remove}\label{remove}} Remove any occurrences of a value from a list: \begin{lstlisting} var list = [1,2,3] list.remove(1) \end{lstlisting} \hypertarget{ismember}{% \section{ismember}\label{ismember}} Tests if a value is a member of a list: \begin{lstlisting} var list = [1,2,3] print list.ismember(1) // expect: true \end{lstlisting} \hypertarget{add}{% \section{Add}\label{add}} Join two lists together: \begin{lstlisting} var l1 = [1,2,3], l2 = [4, 5, 6] print l1+l2 // expect: [1,2,3,4,5,6] \end{lstlisting}
{ "alphanum_fraction": 0.7107212476, "avg_line_length": 18.8602941176, "ext": "tex", "hexsha": "ae90a1cfc6ac0fb9d459970b9d4ea4e7ea0e6135", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-10-31T19:55:27.000Z", "max_forks_repo_forks_event_min_datetime": "2021-10-05T16:56:16.000Z", "max_forks_repo_head_hexsha": "9df2d652d9bc269ce0f8cdeb4d55cec51d95c2f9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ConduitDan/morpho", "max_forks_repo_path": "manual/src/Reference/list.tex", "max_issues_count": 79, "max_issues_repo_head_hexsha": "9df2d652d9bc269ce0f8cdeb4d55cec51d95c2f9", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:06:10.000Z", "max_issues_repo_issues_event_min_datetime": "2021-10-05T17:33:19.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ConduitDan/morpho", "max_issues_repo_path": "manual/src/Reference/list.tex", "max_line_length": 71, "max_stars_count": 10, "max_stars_repo_head_hexsha": "9df2d652d9bc269ce0f8cdeb4d55cec51d95c2f9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ConduitDan/morpho", "max_stars_repo_path": "manual/src/Reference/list.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-26T11:41:50.000Z", "max_stars_repo_stars_event_min_datetime": "2021-09-18T14:44:14.000Z", "num_tokens": 811, "size": 2565 }
\documentclass{article} \pagestyle{empty} \usepackage{amsmath,amssymb,amsfonts} \usepackage{graphicx} \usepackage{multicol} \setlength{\oddsidemargin}{0in} \setlength{\evensidemargin}{0in} \setlength{\topmargin}{0in} \setlength{\textheight}{8.5in} \setlength{\textwidth}{6.5in} \makeatletter \renewcommand*\env@matrix[1][*\c@MaxMatrixCols c]{% \hskip -\arraycolsep \let\@ifnextchar\new@ifnextchar \array{#1}} \makeatother \begin{document} \begin{flushleft} \bfseries{MATH 260, Linear Algebra, Spring `14}\\ \bfseries{Homework 3 \& 4: Matrices, RREF, and Solutions to Systems}\\ \bfseries{Honor Code:} \hspace{3.5in}\bfseries{Names:}\\ \end{flushleft} \begin{flushleft} \vspace{.5in} \section{Homework} \LARGE Due Feb. 11th \\ \normalsize \vspace{0.25in} On page 143: 14, 16, 18, 20, 22\\ AND: Write \# 28 and \# 32 in matrix-vector form and as an augmented matrix.\\ \vspace{0.25in} By converting each of these systems into augmented matrices, then putting the matrices in RREF, determine if the system has a unique, infinite solutions, or no solution.\\ $\begin{array}{ccccc} x+&2y+& z& = &2\\ 2x-&4y-&3z& = &0\\ -x+&6y-&4z& = &2\\ x-&y& & = &4 \end{array} $ \hspace{0.35in} $\begin{array}{ccccc} x-& y+ & z &= 0\\ x+&y& & = 0\\ x+ & 2y & -z & = 0 \end{array}$ \hspace{0.35in} $\begin{array}{ccccc} x- & y- & z & = & 1\\ 2x+& 3y+ & z & = &2 \end{array}$ \end{flushleft} \end{document}
{ "alphanum_fraction": 0.6788530466, "avg_line_length": 26.320754717, "ext": "tex", "hexsha": "781924f811612ebda49dc89b0c507f792e62105a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c92c2f3f9e3fc87a1a89041eb7bfaa1a87c9276d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Pelonza/PB-LinAlg", "max_forks_repo_path": "Spring 2014 - Schmitt/HW and CPAs/HWK3_4_rowops.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c92c2f3f9e3fc87a1a89041eb7bfaa1a87c9276d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Pelonza/PB-LinAlg", "max_issues_repo_path": "Spring 2014 - Schmitt/HW and CPAs/HWK3_4_rowops.tex", "max_line_length": 171, "max_stars_count": null, "max_stars_repo_head_hexsha": "c92c2f3f9e3fc87a1a89041eb7bfaa1a87c9276d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Pelonza/PB-LinAlg", "max_stars_repo_path": "Spring 2014 - Schmitt/HW and CPAs/HWK3_4_rowops.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 567, "size": 1395 }
\documentclass{article} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{geometry} \usepackage[utf8]{inputenc} \usepackage{enumitem} \usepackage{physics} \setlength{\parindent}{0pt} \setlength{\parskip}{1em} \relpenalty=10000 \binoppenalty=10000 \DeclareMathOperator{\lcm}{lcm} \begin{document} \section*{Invariance principle} \begin{enumerate} \item Invariant is a property that remains unchanged when transformations are applied \item Monovariant is a property that always changes in the same direction when transformations are applied. \end{enumerate} \section*{Problems} \begin{enumerate} \item % Engel book $2n$ people are sitting at a table, each having $n-1$ enemies the most. Prove that it is possible that nobody is sitting next to its enemy. \item % http://www-bcf.usc.edu/~pengshi/math149/talk2.pdf Start with a finite sequence $a_1, a_2,\dots, a_n$ of positive integers. If possible, choose two indices $j < k$ such that $a_j$ does not divide $a_k$ , and replace $a_j$ and $a_k$ by $\gcd(a_j, a_k)$ and $\lcm (a_j, a_k )$, respectively. Prove that if this process is repeated, it must eventually stop. \item % http://webcache.googleusercontent.com/search?q=cache%3Awww.math.olympiaadid.ut.ee%2Feng%2Farchive%2Fbw%2Fbw07sol.pdf&oq=cache%3Awww.math.olympiaadid.ut.ee%2Feng%2Farchive%2Fbw%2Fbw07sol.pdf&aqs=chrome..69i57j69i58.1714j0j4&sourceid=chrome&ie=UTF-8 Freddy writes down numbers $1, 2,\dots, n$ in some order. Then he makes a list of all pairs $(i, j)$ such that $1 \leq i < j \leq n$ and the $i$th number is bigger than the $j$th number in his permutation. After that, Freddy repeats the following action while possible: choose a pair $(i, j)$ from the current list, interchange the $i$th and the $j$th number in the current permutation, and delete $(i, j)$ from the list. Prove that Freddy can choose pairs in such an order that, after the process finishes, the numbers in the permutation are ordered ascendingly. \item Suppose not all integers $a,b,c,d$ are equal. Start with $(a,b,c,d)$ and repeatedly replace $(a,b,c,d)$ by $(a-b,b-c,c-d,d-a)$. Prove that then at least one number of the quadruple will eventually become arbitrarily large. \item % http://webcache.googleusercontent.com/search?q=cache%3Awww.math.olympiaadid.ut.ee%2Farhiiv%2Floppv%2Flp2009%2Flp2009.pdf&oq=cache%3Awww.math.olympiaadid.ut.ee%2Farhiiv%2Floppv%2Flp2009%2Flp2009.pdf&aqs=chrome..69i57j69i58.1661j0j4&sourceid=chrome&ie=UTF-8 In a rectangular grid a number of smaller rectangles (possibly overlapping) are cut out from the top-right corner of the original grid. All the remaining squares are filled with integers corresponding to the total numbers of squares which are either directly above or directly to the right of the square. Prove that number of squares with even numbers is at least as big as the number with odd numbers. \item %http://www.hexagon.edu.vn/images/resources/upload/dec3c0b23f6d5bffa9c661616b1658fd/problemsolvingmethods%20in%20combinatorics%20an%20approach%20to%20olympiad_1377958817.pdf $23$ friends want to play soccer. For this they choose a referee and the others split into two teams of $11$ persons each. They want to do this so that the total weight of each team is the same. We know that they all have integer weights and that, regardless of who is the referee, it is possible to make the two teams. Prove that they all have the same weight. \item % https://mks.mff.cuni.cz/kalva/imo/isoln/isoln863.html To each vertex of a regular pentagon an integer is assigned in such a way that the sum of all five numbers is positive. If three consecutive vertices are assigned the numbers $x, y, z$ respectively and $y < 0$ then the following operation is allowed: the numbers $x, y, z$ are replaced by $x+y, -y, z+y$ respectively. Such an operation is performed repeatedly as long as at least one of the five numbers is negative. Determine whether this procedure necessarily comes to and end after a finite number of steps. \item % https://mindyourdecisions.com/blog/2013/06/25/the-man-and-the-lion-puzzle-pursuit-and-evasion-game-theory/ A man is stuck in a perfectly circular arena with a lion. The man can move as fast as the lion. Is it possible for the man to survive? \end{enumerate} \end{document}
{ "alphanum_fraction": 0.7644877144, "avg_line_length": 75.6842105263, "ext": "tex", "hexsha": "0badd251c6c373a5f164ef000072281955f52cb6", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-05-26T15:18:18.000Z", "max_forks_repo_forks_event_min_datetime": "2019-05-26T15:18:18.000Z", "max_forks_repo_head_hexsha": "0dcacba8a6d1769bbccfedda89d08fa22c3f55a1", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "kauraare/maths-olympiad", "max_forks_repo_path": "13_invariance.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0dcacba8a6d1769bbccfedda89d08fa22c3f55a1", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "kauraare/maths-olympiad", "max_issues_repo_path": "13_invariance.tex", "max_line_length": 565, "max_stars_count": 1, "max_stars_repo_head_hexsha": "0dcacba8a6d1769bbccfedda89d08fa22c3f55a1", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "ZhaoWanLong/maths-olympiad", "max_stars_repo_path": "13_invariance.tex", "max_stars_repo_stars_event_max_datetime": "2019-08-21T21:57:43.000Z", "max_stars_repo_stars_event_min_datetime": "2019-08-21T21:57:43.000Z", "num_tokens": 1285, "size": 4314 }
\PassOptionsToPackage{unicode=true}{hyperref} % options for packages loaded elsewhere \PassOptionsToPackage{hyphens}{url} % \documentclass[]{book} \usepackage{lmodern} \usepackage{amssymb,amsmath} \usepackage{ifxetex,ifluatex} \usepackage{fixltx2e} % provides \textsubscript \ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{textcomp} % provides euro and other symbols \else % if luatex or xelatex \usepackage{unicode-math} \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase} \fi % use upquote if available, for straight quotes in verbatim environments \IfFileExists{upquote.sty}{\usepackage{upquote}}{} % use microtype if available \IfFileExists{microtype.sty}{% \usepackage[]{microtype} \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts }{} \IfFileExists{parskip.sty}{% \usepackage{parskip} }{% else \setlength{\parindent}{0pt} \setlength{\parskip}{6pt plus 2pt minus 1pt} } \usepackage{hyperref} \hypersetup{ pdftitle={CLU MS Clinical Psychology Thesis Handbook}, pdfauthor={Jamie Bedics, PhD, ABPP}, pdfborder={0 0 0}, breaklinks=true} \urlstyle{same} % don't use monospace font for urls \usepackage{longtable,booktabs} % Fix footnotes in tables (requires footnote package) \IfFileExists{footnote.sty}{\usepackage{footnote}\makesavenoteenv{longtable}}{} \usepackage{graphicx,grffile} \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} \def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} \makeatother % Scale images if necessary, so that they will not overflow the page % margins by default, and it is still possible to overwrite the defaults % using explicit options in \includegraphics[width, height, ...]{} \setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} \setlength{\emergencystretch}{3em} % prevent overfull lines \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \setcounter{secnumdepth}{5} % Redefines (sub)paragraphs to behave more like sections \ifx\paragraph\undefined\else \let\oldparagraph\paragraph \renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}} \fi \ifx\subparagraph\undefined\else \let\oldsubparagraph\subparagraph \renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}} \fi % set default figure placement to htbp \makeatletter \def\fps@figure{htbp} \makeatother \usepackage{booktabs} \usepackage{amsthm} \makeatletter \def\thm@space@setup{% \thm@preskip=8pt plus 2pt minus 4pt \thm@postskip=\thm@preskip } \makeatother \usepackage[]{natbib} \bibliographystyle{apalike} \title{CLU MS Clinical Psychology Thesis Handbook} \author{Jamie Bedics, PhD, ABPP} \date{2020-05-07} \begin{document} \maketitle { \setcounter{tocdepth}{1} \tableofcontents } \hypertarget{goal-of-the-handbook}{% \chapter{Goal of the Handbook}\label{goal-of-the-handbook}} The goal of this handbook is to provide students with the information needed to successfully complete the master's thesis in the MS in Clinical Psychology Program (MSCP) at California Lutheran University (CLU). The manual should be understood as a supplement to the broader policies and procedures defined by the program and university. \hypertarget{comprehensive-exams-or-thesis}{% \section{Comprehensive Exams or Thesis?}\label{comprehensive-exams-or-thesis}} Upon enrolling the MSCP program, students are expected to complete the comprehensive exam in order to complete the degree. Students can, however, choose to complete a thesis project which would replace the the comprehensive exam option. \textbf{What is the comprehensive exam?}: The comprehensive exam is a closed book/note essay test that covers all the material studied during the course of the MSCP program. The test is offered at the end of the spring semester during the second year. The exam consists of a morning session (9AM-Noon) and an afternoon session (1PM-4PM). During each session, students choose to respond to 3 of 5 essay style questions. Choosing the comprehensive exam results in 3 less credit hours for the completion of the program for a total of 37-units. Instead, in their final semester students pay a comprehensive exam fee. In addition, students taking the comprehensive exams can choose to take PSYC 565 Research Practicum in the fall of their second year or, instead, take an extra elective with Dr.~Bedics' approval. \hypertarget{thesis-project}{% \section{Thesis Project}\label{thesis-project}} Students who choose to work on a thesis must follow the procedures and guidelines in this manual in order to stay in the thesis track. Students who fail to follow any of these steps will be moved back to the comprehensive exam option. Students can, however, continue to work on their research project as an \emph{independent project} and not for partial fulfillment of the requirements of the program (i.e., credit). Students choosing the thesis must enroll in PSYC 565 and PSYC 566 during the fall and spring semester of their second year, respectively. PSYC 565 can, however, be taken if students are choosing to complete an independent research project and would like structured faculty assistance. The completion of 566 for 3-units results in a total of 40-units for the completion of the degree. \textbf{Bottom line}: The main benefit of the thesis project is that it provides students with the opportunity to conduct their own independent research project with the support of faculty. Whether you choose to do this as a \emph{thesis} or an \emph{independent project}, in reality, matters little to future doctoral program. Instead, future advisors will be looking at the \emph{quality} of work regardless of the status of the project. \hypertarget{thesis-checklist}{% \chapter{Thesis Checklist}\label{thesis-checklist}} Instruction: Students who wish to pursue the thesis option are required to meet with Dr.~Bedics at the end of every semester to review the required steps required to complete the thesis project for that semester. Students who miss any of the following steps are automatically removed from the thesis option and required to complete the comprehensive exam. The student can, however, complete a research project but not for the partial fulfillment of the degree (i.e., credit). \begin{longtable}[]{@{}lllll@{}} \toprule & Task & Date Due & Year & Finished\tabularnewline \midrule \endhead 1. & Thesis Topic Approved & October 1st & First Year & {[}\_\_\_\_\_{]}\tabularnewline 2. & Literature Review Draft Psych 564 & December 15th & First Year & {[}\_\_\_\_\_{]}\tabularnewline 3. & Academic Good Standing & December 15th & First Year & {[}\_\_\_\_\_{]}\tabularnewline 4. & Method Section & May 1st & First Year & {[}\_\_\_\_\_{]}\tabularnewline 5. & Literature Review Revision & May 1st & First Year & {[}\_\_\_\_\_{]}\tabularnewline 6. & Academic Good Standing & May 15th & First Year & {[}\_\_\_\_\_{]}\tabularnewline 7. & Committee Assignment & June 30th & Summer & {[}\_\_{]} Chair{[}\_\_{]} Reader\tabularnewline 8. & Academic Good Standing & July 3rd & Summer & {[}\_\_\_\_\_{]}\tabularnewline 9. & Enroll in PSYC 565 & August 1st & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 10. & Committee Approval of Proposal & October 1st & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 11. & IRB Submitted & November 1st & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 12. & Academic Good Standing & December 15th & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 13. & Enroll in PSYC 566 & December 15th & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 14. & Complete Draft to Dr.~Bedics & May 1st & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 15. & Committee Approval of Final & May 10th & Second Year & {[}\_\_{]} Chair{[}\_\_{]} Reader\tabularnewline 16. & OSF Approval & May 1st & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 17. & Thesis Commons & May 15th & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 18. & Thesis Binding & Optional & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 19. & GitHub Blog & Optional & Second Year & {[}\_\_\_\_\_{]}\tabularnewline 20. & Shiny App & Optional & Second Year & {[}\_\_\_\_\_{]}\tabularnewline \bottomrule \end{longtable} \hypertarget{thesis-topic---defining-the-problem-area}{% \section{Thesis Topic - ``Defining the Problem Area''}\label{thesis-topic---defining-the-problem-area}} The general thesis topic is required to be selected during the beginning of the first semester of the first year. The thesis topic, does not, however, determine the hypotheses, methodology or general approach taken by the student to understand the problem (e.g.~experimental, quasi-experimental, meta-analytic methods). \textbf{Due}: October 1st, First Year \hypertarget{literature-review---understanding-the-problem}{% \section{Literature Review - ``Understanding the Problem''}\label{literature-review---understanding-the-problem}} The development of the literature review begins during the fall of the first during PSYC 564 Advanced Research Methods. The literature review will become the ``introduction'' section of the final thesis project. The literature review shows the student's mastery of the literature surrounding the \emph{problem} to be addressed by the thesis. The typical length of a literature review is between 20-40 pages longs but there is no maximum length. The development of the literture review is ongoing throughout the two years until completion. The first draft is completed during the PSYC 564 during the fall of the first year. Revisions are made during the spring of the first year. \textbf{Due}: December 15th, First year; May 15th, end of first year; \hypertarget{method-section---solving-the-problem-4}{% \section{Method Section - ``Solving the Problem'' (\#4)}\label{method-section---solving-the-problem-4}} The method sections defines the procedures of the thesis project. The method section consists of the participant selection, selection of methods of measurements or materials, and the procedure. The method section can be worked on in PSYC 552 Psychometrics during spring of the first \hypertarget{committee-assignment-7}{% \section{Committee Assignment (\#7)}\label{committee-assignment-7}} Committee members are faculty or experts in the field that support the students work on the thesis. Students work with the program director to find the most appropriate committee members to support their research project. \begin{itemize} \tightlist \item Students select \emph{2} committee members including a chair and a reader \item One committee member must be affiliated with CLU; the second committee member can be from CLU or another institution \item Committee members typically hold doctoral degrees in areas that support the students research \item Students typically select a committee member who has domain expertise (often the chair) and one that has methodology expertise (reader).\\ \item All committee members are approved by Dr.~Bedics. \end{itemize} \hypertarget{committee-approval-of-proposal-10}{% \section{Committee Approval of Proposal (\#10)}\label{committee-approval-of-proposal-10}} During the summer following the first year, committee members read the literature review and method section and provide a general statement of approval to Dr.~Bedics. Based upon this approval, students are allowed to progress to the \emph{thesis track.} The rest of the thesis process is guided through coursework including PSYC 565 Research Practicum in the fall of the second year and PSYC 566 Thesis in the spring of the second year. \hypertarget{academic-good-standing-12}{% \section{Academic Good Standing (\#12)}\label{academic-good-standing-12}} Academic good standing refers to maintaining a GPA above a 3.0 throughout the entire program and acting consistently with all policies and procedures defined by the program (see Program Handbook) and university (see university policy and procedures). Any student who receives below a B- in any course is not allowed to complete the thesis for course credit and partial fulfillment of the degree. \hypertarget{coursework-relevant-to-the-thesis}{% \chapter{Coursework Relevant to the Thesis}\label{coursework-relevant-to-the-thesis}} Technically, the knowledge students gain from each course can be used to improve the development of the thesis. For example, if you have a particular interest in a specific disorder then it makes sense that you study that disorder in \emph{PSYC 510 Psychopathology}. There are, however, specific courses where the thesis is explicitly incorporated into assignments. Here are MSCP courses and how they are used to support the thesis: \begin{longtable}[]{@{}lllll@{}} \toprule PSYC\# & Course & Semester & Year & Task\tabularnewline \midrule \endhead 564 & Adv. Research Methods & Fall & One & Literature Review\tabularnewline 552 & Psychometrics & Spring & One & Method\tabularnewline 521 & Ethics & Summer & One & Pre-Registration/Power Analysis/Data Analytic Plan\tabularnewline 565 & Research Practicum & Fall & Two & IRB\tabularnewline 566 & Thesis & Spring & Two & Complete Draft due May 1st\tabularnewline \bottomrule \end{longtable} \hypertarget{formatting}{% \chapter{Formatting}\label{formatting}} The thesis paper is completed in a manner consistent with the \href{https://www.amazon.com/s?k=apa+publication+manual+7th+edition\&crid=7T10VJ2PYQZH\&sprefix=apa+pu\%2Caps\%2C261\&ref=nb_sb_ss_i_1_6}{Publication Manual of the APA (7th Edition)}. Specifically, the following sections should follow, exactly, the guidelines defined by the 7th Edition: \begin{itemize} \tightlist \item Abstract \item Introduction \item Method \item Results \item Discussion \item Tables \item Figures \item Appendices \end{itemize} There are several sections that \textbf{do not} follow the 7th Edition of the Publication Manual: \begin{itemize} \tightlist \item Title Page \item Table of Contents \end{itemize} For examples of these, please see the Thesis Materials section of the MSCP homepage in Blackboard. \hypertarget{open-science-framework}{% \chapter{Open Science Framework}\label{open-science-framework}} \href{https://osf.io/}{OSF} is repository that allows you to transparenlty share your work with the larger scientific community. During the course of the program, you will be using the OSF to organize your thesis and other independent research projects. \hypertarget{thesis-binding}{% \chapter{Thesis Binding}\label{thesis-binding}} The following are instructions for binding your thesis. The binding of your thesis is \emph{optional} and at your expense.You are responsible for the spelling, grammar, and correct APA formatting of your thesis. A bound thesis is a \textbf{final} thesis. \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item At least three (3) bound copies of the Thesis must be ordered. \end{enumerate} \begin{enumerate} \def\labelenumi{\alph{enumi}.} \tightlist \item One copy for the Graduate School of Psychology, one copy for the Thesis Committee Chair, and one personal copy for your possession. You can order more if you prefer (see \#2). \item The three copies must be printed on 25\% rag or cotton fiber watermarked white paper, at least 20 pound weight, 8½ x 11 inches in size (EZERASE, or similar paper is not acceptable). A good example is Southworth Fine Business Paper, 25\% cotton, 24 pound, white, stock \#403C which is available for purchase from Office Depot, OfficeMax, and Staples. A similar 20 pound weight paper is also available. \item Original signed signature pages on the same paper must be submitted with each of the three copies. \end{enumerate} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{1} \tightlist \item Additional personal copies may be ordered at the same time. \end{enumerate} \begin{enumerate} \def\labelenumi{\alph{enumi}.} \tightlist \item Personal copies may be printed on paper of the student's choice (e.g., 20 pound paper). \item Signature pages for the personal copies may be photocopies of the originals as long as they are on paper that is identical to the rest of the thesis. \end{enumerate} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{2} \tightlist \item Copies for binding must be delivered to the Program Specialist. \end{enumerate} \begin{enumerate} \def\labelenumi{\alph{enumi}.} \tightlist \item The copies delivered to the Program Specialist are NOT to be bound - just packaged with bright colored paper separating the individual copies. \item Students are responsible for paying binding fees for all copies (the three required copies and for any additional personal copies). The cost is \$40 per copy (no matter the length), and to be paid by check to CLU. Prices may change. \item The Program Specialist will forward the copies to the bindery as they are delivered. \item Once the Program Specialist receives the copies and payment for binding, a change of grade will be submitted to the Registrar's Office. \end{enumerate} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{3} \tightlist \item The bound copies are typically ready in about 6-8 weeks and are distributed as follows: \end{enumerate} \begin{enumerate} \def\labelenumi{\alph{enumi}.} \tightlist \item The Graduate School of Psychology copy and the Thesis Committee Chair copy will be delivered via campus mail by the Program Specialist. \item Students will be notified when their personal copies are ready for pick-up. \end{enumerate} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{4} \tightlist \item If you have any questions regarding the binding process, please do not hesitate to contact Mengmeng Liu, Graduate Program Specialist, at 805-493-3662 or at \href{mailto:[email protected]}{\nolinkurl{[email protected]}}. \end{enumerate} \hypertarget{thesis-commons}{% \chapter{Thesis Commons}\label{thesis-commons}} \href{https://thesiscommons.org/}{Thesis Commons} is a place for you publish your thesis to be seen by others. Thesis Commons is supported by OSF and is a way to both archive and showcase your work along with your OSF project. \hypertarget{presentations-and-publications}{% \chapter{Presentations and Publications}\label{presentations-and-publications}} The faculty hope you present your work at conferences and in publications. Please remember to contact your chair \emph{prior} to submitting your work to any professional outlet. Your committee will typically be authors on all of your publically published work. \bibliography{book.bib,packages.bib} \end{document}
{ "alphanum_fraction": 0.7738381732, "avg_line_length": 52.0863509749, "ext": "tex", "hexsha": "ff0681e469a1d79b024bb21b6bbe5b85783fba3b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bec8d34580671098c98b5ecb700928f565e9ce66", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "jdbedics/thesishandbook", "max_forks_repo_path": "docs/thesishandbook 3.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "bec8d34580671098c98b5ecb700928f565e9ce66", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "jdbedics/thesishandbook", "max_issues_repo_path": "docs/thesishandbook 3.tex", "max_line_length": 444, "max_stars_count": null, "max_stars_repo_head_hexsha": "bec8d34580671098c98b5ecb700928f565e9ce66", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "jdbedics/thesishandbook", "max_stars_repo_path": "docs/thesishandbook 3.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4835, "size": 18699 }
\section{Experimental Results\label{sec:results}} In experiments on the English and Spanish TAC KBC slot-filling tasks, we find that both USchema and LSTM models outperform the CNN across languages, and that the LSTM tends to perform slightly better than USchema as the only model. Ensembling the LSTM and USchema models further increases final F1 scores in all experiments, suggesting that the two different types of model compliment each other well. Indeed, in Section \ref{sec:uschema-lstm} we present quantitative and qualitative analysis of our results which further confirms this hypothesis: the LSTM and USchema models each perform better on different pattern lengths and are characterized by different precision-recall tradeoffs. \subsection {English TAC Slot-filling Results} \begin{table}[t!] \setlength{\tabcolsep}{4.1pt} \begin{center} \begin{tabular}{|lrrr|} \hline \bf Model & \bf Recall & \bf Precision & \bf F1 \\ \hline\hline CNN & 31.6 & 36.8 & 34.1 \\ %CNN-parse & ? & ? & ? \\ LSTM & 32.2 & 39.6 & \bf 35.5 \\ USchema & 29.4 & 42.6 & 34.8 \\ \hline\hline USchema+LSTM & 34.4 & 41.9 & 37.7 \\ USchema+LSTM+Es & 38.1 & 40.2 & \bf 39.2 \\ \hline\hline USchema+LSTM+AN & 36.7 & 43.1 & 39.7 \\ USchema+LSTM+Es+AN & 40.2 & 41.2 & \bf 40.7 \\ \citet{roth2014relationfactory} & 35.8 & 45.7 & 40.2 \\ %\hline\hline %\citet{angeli2014stanford}* & --- & --- & 40.9 \\ %USchema+LSTM+Es+AN * & 43.1 & 41.2 & \bf 42.1 \\ \hline \end{tabular} \caption{Precision, recall and F1 on the English TAC 2013 slot-filling task. AN refers to alternative names heuristic and Es refers to the addition of Spanish text at train time. LSTM+USchema ensemble outperforms any single model, including the highly-tuned top 2013 system of \protect\citet{roth2014relationfactory}, despite using no handwritten patterns. %both use hand written patterns. * = optimal per relation tuning \label{en-tac-table}} \end{center} \vspace{-.3cm} \end{table} % % \begin{table}[t!] \begin{center} \begin{tabular}{|lrrr|} \hline \bf Model & \bf Recall & \bf Precision & \bf F1 \\ \hline\hline CNN & 28.1 & 29.0 & 28.5 \\ %CNN-parse & ? & ? & ? \\ LSTM & 27.3 & 32.9 & \bf 29.8 \\ USchema & 24.3 & 35.5 & 28.8 \\ \hline\hline USchema+LSTM & 34.1 & 29.3 & 31.5 \\ USchema+LSTM+Es & 34.4 & 31.0 & \bf 32.6 \\ %USchema+LSTM+AN & 34.7 & 29.3 & 31.7 \\ %USchema+LSTM+AN+Es & 33.5 & 32.2 & \bf 32.8 \\ % \citet{bentortac14} & 25.7 & 42.8 & 32.1 \\ % \hline\hline % ICTAS\_OKN & 24.3 & 59.0 & 34.4 \\ % RPI Blender & 29.4 & 47.8 & 36.4 \\ % \citet{angeli2014stanford} & 29.8 & 58.5 & \bf 39.5 \\ \hline \end{tabular} \caption{Precision, recall and F1 on the English TAC 2014 slot-filling task. Es refers to the addition of Spanish text at train time. The AN heuristic is ineffective on 2014 adding only 0.2 to F1. Our system would rank 4/18 in the official TAC 2014 competition behind systems that use hand-written patterns and active learning despite our system using neither of these additional annotations \protect\citep{SurdeanuMihai2014}.\label{2014-en-tac-table}} \end{center} \vspace{-.4cm} \end{table} Tables \ref{en-tac-table} and \ref{2014-en-tac-table} present the performance of our models on the 2013 and 2014 English TAC slot-filling tasks. Ensembling the LSTM and USchema models improves F1 by 2.2 points for 2013 and 1.7 points for 2014 over the strongest single model on both evaluations, LSTM. Adding the alternative names (AN) heuristic described in Section \ref{sec:models} increases F1 by an additional 2 points on 2013, resulting in an F1 score that is competitive with the state-of-the-art. We also demonstrate the effect of jointly learning English and Spanish models on English slot-filling performance. Adding Spanish data improves our F1 scores by 1.5 points on 2013 and 1.1 on 2014 over using English alone. This places are system higher than the top performer at the 2013 TAC slot-filling task even though our system uses no hand-written rules. The state of the art systems on this task all rely on matching handwritten patterns to find additional answers while our models use only automatically generated, indirect supervision; even our AN heuristics (Section \ref{sec:pipeline}) are automatically generated. The top two 2014 systems were \citet{angeli2014stanford} and RPI Blender \citep{SurdeanuMihai2014} who achieved F1 scores of 39.5 and 36.4 respectively. Both of these systems used additional active learning annotation. The third place team \citep{Lin2014} relied on highly tuned patterns and rules and achieved an F1 score of 34.4. % ICTAS\_OKN & 24.3 & 59.0 & 34.4 \\ % RPI Blender & 29.4 & 47.8 & 36.4 \\ % \citet{angeli2014stanford} & 29.8 & 58.5 & \bf 39.5 \\ Our model performs substantially better on 2013 than 2014 for two reasons. First, our RelationFactory~\citep{roth2014relationfactory} retrieval pipeline was a top retrieval pipeline on the 2013 task, but was outperformed on the 2014 task which introduced new challenges such as confusable entities. Second, improved training using active learning gave the top 2014 systems a boost in performance. No 2013 systems, including ours, use active learning. \citet{bentortac14}, the 4th place team in the 2014 evaluation, used the same retrieval pipeline \citep{roth2014relationfactory} as our model and achieved an F1 score of 32.1. %\begin{table}[tb] %\begin{center} %\caption{Precision, recall and F1 of English-only models on the English TAC 2012 slot-filling task. \label{2012-en-tac-table}} %\begin{tabular}{|lrrr|} %\hline %\bf Model & \bf Recall & \bf Precision & \bf F1 \\ %\hline\hline %CNN & 28.5 & 26.0 & 27.2 \\ %LSTM & 26.9 & 27.1 & 27.0 \\ %USchema & 22.0 & 34.2 & 26.8 \\ %\hline\hline %USchema+LSTM & 30.2 & 26.7 & 29.1 \\ %\citet{angeli2014stanford}* & --- & --- & 30.7 \\ %\hline %\end{tabular} %\end{center} %\end{table} %%%% %%%% \subsection{Spanish TAC Slot-filling Results \label{sec:qual-anal}} \begin{table} \begin{center} \begin{tabular}{|lrrr|} \hline \bf Model & \bf Recall & \bf Precision & \bf F1 \\ \hline\hline %CNN & 24.5 & 7.5 & 11.4 \\ %CNN +Dict & 10.5 & 20.3 & 13.8 \\ LSTM & 9.3 & 12.5 & 10.7 \\ LSTM+Dict & 14.7 & 15.7 & 15.2 \\ USchema & 15.2 & 17.5 & 16.3 \\ \hline\hline USchema+LSTM & 21.7 & 14.5 & 17.3 \\ USchema+LSTM+Dict & 26.9 & 15.9 & \bf 20.0 \\ \hline \end{tabular} \caption{Zero-annotation transfer learning F1 scores on 2012 Spanish TAC KBP slot-filling task. Adding a translation dictionary improves all encoder-based models. Ensembling LSTM and USchema models performs the best. \label{es-tac-table}} \end{center} \vspace{-.6cm} \end{table} %Table \ref{es-tac-table} presents 2012 Spanish TAC slot-filling results for our multilingual relation extractors trained using zero-annotation transfer learning. For both the CNN and LSTM, tying word embeddings between the two languages results in substantial improvements. We see that ensembling the non-dictionary LSTM with USchema gives a slight boost over USchema alone, but ensembling the dictionary-tied LSTM with USchema provides a significant increase of nearly 4 F1 points over the highest-scoring single model, USchema. Clearly, grounding the Spanish data using a translation dictionary provides much better Spanish word representations. These improvements are complementary to the baseline USchema model, and yield impressive results when ensembled. Table \ref{es-tac-table} presents 2012 Spanish TAC slot-filling results for our multilingual relation extractors trained using zero-annotation transfer learning. Tying word embeddings between the two languages results in substantial improvements for the LSTM. We see that ensembling the non-dictionary LSTM with USchema gives a slight boost over USchema alone, but ensembling the dictionary-tied LSTM with USchema provides a significant increase of nearly 4 F1 points over the highest-scoring single model, USchema. Clearly, grounding the Spanish data using a translation dictionary provides much better Spanish word representations. These improvements are complementary to the baseline USchema model, and yield impressive results when ensembled. In addition to embedding semantically similar phrases from English and Spanish to have high similarity, our models also learn high-quality multilingual word embeddings. In Table \ref{joint-word} we compare Spanish nearest neighbors of English query words learned by the LSTM with dictionary ties versus the LSTM with no ties, using no unsupervised pre-training for the embeddings. Both approaches jointly embed Spanish and English word types, using shared entity embeddings, but the dictionary-tied model learns qualitatively better multilingual embeddings. %% APPENDIX %% See Section \ref{sec:more-qual-anal} for additional qualitative results. \begin{table}[h] \setlength{\tabcolsep}{3pt} \small \begin{center} %\begin{minipage}[b]{0.45\linewidth} %\hspace*{-17pt} \begin{tabular}{|ll|} \hline \multicolumn{2}{|c|}{ \bf CEO}\\ \multicolumn{1}{|c}{Dictionary} & \multicolumn{1}{c|} {No Ties} \\ \hline jefe (chief) & CEO \\ CEO & director (principle) \\ ejecutivo (executive) & directora (director) \\ cofundador (co-founder) & firma (firm) \\ president (chairman) & magnate (tycoon)\\ \hline % \multicolumn{2}{|c|}{\bf headquartered}\\ \multicolumn{1}{|c}{Dictionary} & \multicolumn{1}{c|} {No Ties} \\ \hline sede (headquarters) & Geol\'{o}gico (Geological) \\ situado (located) & Treki (Treki) \\ selectivo (selective) & Geof\'{i}sico(geophysical) \\ profesional (vocational) & Normand\'{i}a (Normandy)\\ bas\'{a}ndose (based) & emplea (uses)\\ \hline %\end{tabular} %\end{minipage} %\hspace{-12.5pt} %\begin{minipage}[b]{0.45\linewidth} %\begin{tabular}{|ll|} %\hline \multicolumn{2}{|c|}{\bf hubby}\\ \multicolumn{1}{|c}{Dictionary} & \multicolumn{1}{c|} {No Ties} \\ \hline matrimonio (marriage) & esposa (wife) \\ casada (married) & esposo (husband) \\ esposa (wife) & casada(married) \\ cas\'{o} (married) & embarazada (pregnant) \\ embarazada (pregnant) & embarazo (pregnancy) \\ \hline % \multicolumn{2}{|c|}{\bf alias}\\ \multicolumn{1}{|c}{Dictionary} & \multicolumn{1}{c|} {No Ties} \\ \hline simplificado (simplified) & Weaver (Weaver)\\ sabido (known) & interrogaci\'{o}n (question) \\ seud\'{o}nimo (pseudonym) & alias \\ privatizaci\'{o}n (privatization) & reelecto (reelected) \\ nombre (name) & conocido (known)\\ \hline \end{tabular} \caption{Example English query words (not in translation dictionary) in bold with their top nearest neighbors by cosine similarity listed for the dictionary and no ties LSTM variants. Dictionary-tied nearest neighbors are consistently more relevant to the query word than untied. \label{joint-word}} %\end{minipage} \end{center} % \vspace{-.4cm} \end{table} %%%% %%%% \subsection{USchema vs LSTM \label{sec:uschema-lstm}} \begin{figure}[t!] \begin{center} \includegraphics[scale=0.45]{pr-curve} \caption{Precision-Recall curves for USchema and LSTM on 2013 TAC slot-filling. USchema achieves higher precision values whereas LSTM has higher recall. \label{fig:pr-curve}} \end{center} % \vspace{-.4cm} \end{figure} \begin{figure}[t!] \begin{center} \includegraphics[scale=0.45]{f1-vary-pat-length} \caption{F1 achieved by USchema vs. LSTM models for varying pattern token lengths on 2013 TAC slot-filling. LSTM performs better on longer patterns whereas USchema performs better on shorter patterns. \label{fig:f1-vary-pats}} \end{center} \vspace{-.4cm} \end{figure} We further analyze differences between USchema and LSTM in order to better understand why ensembling the models results in the best performing system. Figure \ref{fig:pr-curve} depicts precision-recall curves for the two models on the 2013 slot-filling task. As observed in earlier results, the LSTM achieves higher recall at the loss of some precision, whereas USchema can make more precise predictions at a lower threshold for recall. In Figure \ref{fig:f1-vary-pats} we observe evidence for these different precision-recall trade-offs: USchema scores higher in terms of F1 on shorter patterns whereas the LSTM scores higher on longer patterns. As one would expect, USchema successfully matches more short patterns than the LSTM, making more precise predictions at the cost of being unable to predict on patterns unseen during training. The LSTM can predict using any text between entities observed at test time, gaining recall at the loss of precision. Combining the two models makes the most of their strengths and weaknesses, leading to the highest overall F1. % error analysis Qualitative analysis of our English models also suggests that our encoder-based models (LSTM) extract relations based on a wide range of semantically similar patterns that the pattern-matching model (USchema) is unable to score due to a lack of exact string match in the test data. For example, Table \ref{tab:lstm-us-similar-rels} lists three examples of the \emph{per:children} relation that the LSTM finds which USchema does not, as well as three patterns that USchema does find. Though the LSTM patterns are all semantically and syntactically similar, they each contain different specific noun phrases, e.g. \emph{Lori}, \emph{four children}, \emph{toddler daughter}, \emph{Lee and Albert}, etc. Because these specific nouns weren't seen during training, USchema fails to find these patterns whereas the LSTM learns to ignore the specific nouns in favor of the overall pattern, that of a parent-child relationship in an obituary. USchema is limited to finding the relations represented by patterns observed during training, which limits the patterns matched at test-time to short and common patterns; all the USchema patterns matched at test time were similar to those listed in Table \ref{tab:lstm-us-similar-rels}: variants of \emph{'s son, '}. \begin{table}[h] \begin{center} \small \begin{tabular}{|p{7.6cm}|} \hline \multicolumn{1}{|c|}{\textbf{LSTM}} \\ \hline {\bf McGregor} \emph{is survived by his wife, Lori, and four children, daughters Jordan,} { \bf Taylor} and Landri, and a son, Logan. \\ \hline In addition to his wife, {\bf Mays} \emph{is survived by a toddler daughter and a son,} {\bf Billy Mays Jr.}, who is in his 20s. \\ \hline {\bf Anderson} \emph{is survived by his wife Carol, sons Lee and Albert, daughter} {\bf Shirley Englebrecht} and nine grandchildren. \\ \hline\hline \multicolumn{1}{|c|}{\textbf{USchema}} \\ \hline {\bf Dio} \emph{'s son,} {\bf Dan Padavona}, cautioned the memorial crowd to be screened regularly by a doctor and take care of themselves, something he said his father did not do. \\ \hline But {\bf Marshall} \emph{'s son,} {\bf Philip}, told a different story. \\ \hline ``I'd rather have Sully doing this than some stranger, or some hotshot trying to be the next Billy Mays,'' said the guy who actually is the next {\bf Billy Mays}\emph{, his son} {\bf Billy Mays III}. \\ \hline \end{tabular} \caption{Examples of the \emph{per:children} relation discovered by the LSTM and Universal Schema. Entities are bold and patterns italicized. The LSTM models a richer set of patterns \label{tab:lstm-us-similar-rels}} \end{center} \vspace{-.5cm} \end{table}
{ "alphanum_fraction": 0.7332943698, "avg_line_length": 65.5276595745, "ext": "tex", "hexsha": "09018e8021307313e83dda8fed332e85a80accef", "lang": "TeX", "max_forks_count": 23, "max_forks_repo_forks_event_max_datetime": "2020-08-16T04:10:07.000Z", "max_forks_repo_forks_event_min_datetime": "2016-01-30T21:26:45.000Z", "max_forks_repo_head_hexsha": "929f39dfb8c2f0b44318378fcd8e75557efbaf9f", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "ken77921/torch-relation-extraction", "max_forks_repo_path": "doc/naacl2016/results.tex", "max_issues_count": 7, "max_issues_repo_head_hexsha": "929f39dfb8c2f0b44318378fcd8e75557efbaf9f", "max_issues_repo_issues_event_max_datetime": "2018-10-12T17:38:16.000Z", "max_issues_repo_issues_event_min_datetime": "2016-04-01T05:10:09.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "ken77921/torch-relation-extraction", "max_issues_repo_path": "doc/naacl2016/results.tex", "max_line_length": 1250, "max_stars_count": 61, "max_stars_repo_head_hexsha": "929f39dfb8c2f0b44318378fcd8e75557efbaf9f", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "ken77921/torch-relation-extraction", "max_stars_repo_path": "doc/naacl2016/results.tex", "max_stars_repo_stars_event_max_datetime": "2021-05-18T21:33:46.000Z", "max_stars_repo_stars_event_min_datetime": "2016-01-30T20:07:37.000Z", "num_tokens": 4521, "size": 15399 }
\documentclass[DM,authoryear,toc]{lsstdoc} \usepackage[nonumberlist,nogroupskip,toc,numberedsection=autolabel]{glossaries} \usepackage{environ} \usepackage{enumitem} % Inconsolata is used by lsst-texmf, but doesn't have an italic variant. Fake one. \setmonofont[AutoFakeSlant]{Inconsolata} \makeglossaries \input{glossary} \input meta.tex \title{QA Strategy Working Group Report} \author{% Bellm, E.C., Chiang, H.-F., Fausti, A., Krughoff, K.S., MacArthur, L.A., Morton, T.D., Swinbank, J.D. (chair) and Roby, T. } \setDocRef{\lsstDocType-\lsstDocNum} \date{\vcsDate} \setDocUpstreamLocation{\url{https://github.com/lsst-dm/dmtn-085 }} \setDocAbstract{% This document describes the work undertaken by the QA Strategy Working Group and presents its conclusions as a series of recommendations to DM Project Management. } \setDocChangeRecord{% \addtohist{1.0}{\vcsDate}{Released to DMLT.}{Swinbank} } % Hyperref plays havoc with my crazy recommendation linking TOCs if we let it % turn the section name into a hyperlink, so we use the page numbers instead. \hypersetup{linktoc=page} \makeatletter \newcommand{\printrecs}{% \section{Recommendations}% \label{sec:recs} \begin{enumerate}[leftmargin=7em,label=QAWG-REC-\arabic*:]% \def\@noitemerr{\@latex@warning{Empty objective list}}% \@starttoc{rec}% \end{enumerate}% } \newcommand{\l@rec}[2]{#1} \newenvironment{recbox} { \begin{center} \begin{minipage}[h]{.85\linewidth} \begin{tcolorbox}[colback=green!5!white,colframe=green!75!black,title=\textbf{Recommendation}] } { \end{tcolorbox} \end{minipage} \end{center} } \makeatother % Write recommendations so they looks something like this: % % \begin{recommendation}{rec:label}{Brief summary} % Explanatory text, if any. % \end{recommendation} \NewEnviron{recommendation}[2] {% \label{#1}% \addcontentsline{rec}{rec}{% \noexpand\unexpanded{\unexpanded\expandafter{\item{#2 (\S\ref{#1})}}}% }% \begin{recbox} \emph{#2.} \par \BODY% \end{recbox} }% \begin{document} \maketitle \section{Introduction} \label{sec:intro} This report constitutes the primary artifact produced by the DM \gls{qawg}, addressing its charge as defined in \citeds{LDM-622}. It is worth starting by revisiting the definition of Quality \textit{Assurance}, or \gls{qa}. In particular, note that \citeds{LDM-522} defines QA as ``Quality \textit{Analysis}'', a process which is undertaken by humans during commissioning and operations, and which stands in contrast to automated Quality Control (\gls{qc}) systems. For the purposes of this group, we have taken a more holistic definition (following the guidance in the charge) of \gls{qa}, covering all activities undertaken by the \gls{dm} construction project to ensure the ultimate quality of its deliverables. The complete scope of ``\gls{qa} within \gls{dm}'' is too large to be coherently addressed by any group on a limited timescale. Per its charge, then, the \gls{qawg} has constrained itself to considering only those aspects of \gls{qa} which are most directly relevant to the construction of the LSST Science Pipelines. In particular, we have considered the tools which developers need to construct and debug individual algorithms; tools which can be used to investigate the execution of pipeline runs at scales beyond those which are trivially addressable by individual developers on single compute systems; and tools which can be used to demonstrate that the overall system meets its requirements (to ``verify'' the system). This deliberately excludes broader requirements of Commissioning, Science Validation, or run-time \gls{sdqa}\footnote{Effectively, code executed during prompt or data release production processing to demonstrate that the data being both ingested and released is of adequate quality.}. This report consists broadly of three parts. In \S\ref{sec:approach}, we describe the approach that the \gls{qawg} took to addressing its charge. In \S\ref{sec:design}, we present a high-level overview of the systems that we envisage the future DM comprising. Finally, in \S\ref{sec:comp} we identify specific components --- which may be pieces of software, documentation, procedures, or other artifacts --- that should be developed to enable the capabilities we regard as necessary. In some cases, these components may be entirely new developments; in others, existing tools developed by the \gls{dm} subsystem may already be fit for purpose, or can be adapted with some effort. We have noted when this is the case. Throughout, we provide a number of recommendations which we suggest should be adopted by the \gls{dm} Subsystem as a whole. These recommendations identify specific actions that should be taken or capabilities that should be provided; in general, addressing them will require action by the Project Manager or T/CAMs to schedule appropriate activities. Finally, in Appendix \ref{glo:main}, we define a number of key terms which are used throughout this report and which we suggest be adopted across \gls{dm} to provide an unambiguous vocabulary for referring to QA topics. \begin{recommendation} {rec:glossary} {Adopt the definitions of QA-related terms in the \citeds{DMTN-085} glossary subsystem-wide} For example, by inclusion in a subsystem-level glossary; refer to \jira{DM-9807}, \jira{DM-14877}, and \jira{DM-14911}. \end{recommendation} \input{approach/index.tex} \input{design/index.tex} \input{components/index.tex} \section{Conclusion} This document has described the deliberations and conclusions of the QA Working Group. It has taken a wide-ranging view over various aspects of the DM Subsystem, and presented a wide range of recommendations, which are summarised in Appendix \ref{sec:recs}. Many of these recommendations are evolutionary improvements to existing DM tools, practices or documentation. A few involve the development of new capabilities. Of particular note in this latter capability are the call for the development of a integrated drill-down system, described in \S\ref{sec:comp:drill}, and for adoption of the Dask system \S\ref{sec:comp:vis}\footnote{We note that, at time of writing, some work involving Dask is now underway within DM, although we are not aware of design documentation describing exactly what capabilities will be provided.}. These capabilities will require significant resources to deliver, and will therefore require action by DM Project Management. However, we also commend to management some of the lower-profile recommendations: in particular, we feel that modest improvements to dataset organization and to the \gls{ci} system could have major impacts on DM's overall productivity. \appendix \printrecs \glsaddall \renewcommand*{\glsautoprefix}{glo:} \printglossary[style=index,numberedsection=autolabel] \bibliography{lsst,lsst-dm,refs_ads,refs,books} \end{document}
{ "alphanum_fraction": 0.7786037021, "avg_line_length": 40.5976331361, "ext": "tex", "hexsha": "62e897ee21cea68e308e0cc539fb029792856615", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "559239d28f6abb0c2e41fa28116608d56e66b2f0", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "lsst-dm/dmtn-085", "max_forks_repo_path": "DMTN-085.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "559239d28f6abb0c2e41fa28116608d56e66b2f0", "max_issues_repo_issues_event_max_datetime": "2018-07-11T05:21:05.000Z", "max_issues_repo_issues_event_min_datetime": "2018-07-11T05:21:05.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "lsst-dm/dmtn-085", "max_issues_repo_path": "DMTN-085.tex", "max_line_length": 408, "max_stars_count": null, "max_stars_repo_head_hexsha": "559239d28f6abb0c2e41fa28116608d56e66b2f0", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "lsst-dm/dmtn-085", "max_stars_repo_path": "DMTN-085.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1808, "size": 6861 }
\chapter{Towards Virtually Nilpotent Groups}\label{chapter:virtually-heisenberg} In \cref{chapter:polynomial-geodesic-growth} we characterised the geodesic growth of virtually abelian groups. In this chapter, we take the next step towards a classification of polynomial geodesic growth by furnishing an example of a virtually 2-step nilpotent group with polynomial geodesic growth. This is the first group which has been shown to have polynomial geodesic growth and is not virtually abelian. This result is important as it shows that a classification of polynomial geodesic growth must include groups beyond the class of virtually abelian. In \cref{thm:main}, we show that there is a group that is virtually 2-step nilpotent and has polynomial geodesic growth. Our proof relies on a result that is implicit in the work of Blach\`ere~\cite{blachere2003} which we provide in Lemma~\ref{lem:heisenberg-geodesic}. This result shows that there are groups with subexponential geodesic growth which are not virtually abelian, in particular, this example opens the door to the possible existence of a virtually nilpotent group that has intermediate geodesic growth with respect to some generating set. It also raises the question of whether polynomial geodesic growth is restricted to virtually nilpotent groups of step at most 2. \section{A Virtually Heisenberg Group} The integer Heisenberg group is the group of $3 \times 3$ upper-triangular integer matrices with 1's on their diagonals, that is, the group generated by the matrices \[ a = \begin{bmatrix} 1 & 1 & 0\\ 0 & 1 & 0\\ 0 & 0 & 1 \end{bmatrix} \quad\text{and}\quad b = \begin{bmatrix} 1 & 0 & 0\\ 0 & 1 & 1\\ 0 & 0 & 1 \end{bmatrix}. \] It is well known that the integer Heisenberg group, $\Heisenberg$, has the presentation \[ \Heisenberg = \left\langle a,b \mid [a,[a,b]] = [b,[a,b]] = 1 \right\rangle. \] Let $X$ denote the standard generating set $X = \{a,a^{-1},b,b^{-1}\}$ for $\Heisenberg$, following the convention of \textcite{blachere2003} we write $(x,y,z) \in \Heisenberg$ for the element corresponding to the normal form word $[a,b]^z b^y a^x$. Inspired by the polynomial geodesic growth example in \cref{eq:virtually-z2-example}, we construct a virtually Heisenberg group $\vH$ as follows. \begin{equation}\label{eq:presentation-1} \vH = \left\langle a,b,t \mid [a,[a,b]] = [b,[a,b]] = t^2 = 1,\ a^t = b \right\rangle. \end{equation} From the relation $b = a^t$ and applying a Tietze transform, we see that $S = \{a,a^{-1},t\}$ is a generating set for $\vH$. We provide a partial picture of the Cayley graph of $\vH$ in Figure~\ref{fig:HeisCG}. Informally, one may think of this group as two copies of $\Heisenberg$ ``glued'' together with a ``twist'' by $t$ edges. \begin{figure}[!ht] \centering \includegraphics{figure/virtuallyHeisenberg} \caption{A Cayley graph for $\vH$ with respect to the generating set $S$ where the undirected edges are labelled by $t$ and directed edges labelled by $a$.}\label{fig:HeisCG} \end{figure} Our goal is to show that any geodesic of $\vH$ with respect to the generating set $S$ can contain at most $7$ instances of the letter $t$. From this we are able to place a polynomial upper bound on the geodesic growth function of $\vH$. To do this, we first study geodesics of the integer Heisenberg group with respect to the generating set $X$. \Textcite[Theorem~2.2]{blachere2003} provided explicit formulae for the length of elements in $\Heisenberg$, with respect the generating set $X$, by constructing geodesic representatives. We provide the following lemma which is implicit in the proof of Theorem~2.2 in \cite{blachere2003}. \begin{lemma}\label{lem:heisenberg-geodesic} Each element $(x,y,z) \in \Heisenberg$ has a geodesic representative with respect to the generating set $X = \{a,a^{-1},b,b^{-1}\}$ of the form \[ a^{\alpha_1} b^{\beta_1} a^{\alpha_2} b^{\beta_2} a^{\alpha_3} b^{\beta_3} \quad \mathrm{or} \quad b^{\beta_1} a^{\alpha_1} b^{\beta_2} a^{\alpha_2} b^{\beta_3} a^{\alpha_3} \] where each $\alpha_i,\beta_j \in \mathbb{Z}$. \end{lemma} \begin{proof} We see that the lemma holds in the case of $(0,0,0) \in \Heisenberg$ as the empty word $\varepsilon \in S^*$ is such a geodesic. In the remainder of the proof, we assume that $(x,y,z) \neq (0,0,0)$. Following Blach\`ere~\cite[p.~22]{blachere2003} we reduce this proof to the case where $x,z \geq 0$ and $-x \leq y \leq x$ as follows. Let $\tau \colon X^* \to X^*$ be the monoid isomorphism defined such that $\tau(a^k)=b^k$ and $\tau(b^k)=a^k$ for each $k \in \mathbb{Z}$. Let $w^R$ denote the \emph{reverse} of the word $w$, that is, if $w = w_1 w_2 \cdots w_k$ where each $w_j \in X$, then $w^R = w_k \cdots w_2 w_1$. If $w \in X^*$ is a word as described in the lemma statement with $\overline{w} = (x,y,z)$, then $w'= \tau(w^R)$ is also in the form described in the lemma statement and $\overline{w'} = (y,x,z)$. Moreover, we see that $\tau(w^R)$ is a geodesic if and only if $w$ is a geodesic. Defining the monoid isomorphisms $\varphi_a,\varphi_b\colon X^* \to X^*$ by $\varphi_a(a^k) = a^{-k}$, $\varphi_a(b^k) = b^k$, and $\varphi_b(a^k) = a^{k}$, $\varphi_b(b^k) = b^{-k}$ for each $k \in \mathbb{Z}$, we see that if $w \in X^*$ is a geodesic representative for $(x,y,z) \in \Heisenberg$, then $\varphi_a(w)$, $\varphi_b(w)$ and $\varphi_a(\varphi_b(w))$ are geodesics for $(-x,y,-z)$, $(x,-y,-z)$ and $(-x,-y,z)$, respectively, and each such word is in the form as described in the lemma statement. From application of the above transformations, we may assume without loss of generality that $x,z \geq 0$ and $-x \leq y \leq x$. Let $h = (x,y,z) \in \Heisenberg$, then from \cite[Theorem~2.2]{blachere2003} we have the following formulae for the length $\ell_X(h)$ and (most importantly for us) geodesic representative for $h$. \begin{itemize} \item[I.] If $y \geq 0$, then we have the following cases. \begin{itemize} \item[I.1.] If $x < \sqrt{z}$, then $\ell_X(h) = 2\lfloor\,2\sqrt{z}\, \rfloor - x - y$ and $h$ has a geodesic representative given by $b^{y-y'} S_z a^{x-x'}$ where $x',y'$ are the values given by $\overline{S_z} = (x',y',z)$ (cf.~\cite[p.~32]{blachere2003}), where $S_z$ is as follows. \begin{itemize} \item If $z = (n+1)^2$ for some $n \in \mathbb{N}$, then $S_z = a^{n+1} b^{n+1}$; \item if there exists a $k \in \mathbb{N}$ with $1 \leq k \leq n$ such that $z = n^2 + k$, then let $S_z = a^k b a^{n-k} b^n$; \item otherwise, there exists some $k \in \mathbb{N}$ with $1 \leq k \leq n$ such that $z = n^2+n+k$ and we have $S_z = a^k b a^{n+1-k} b^n$. \end{itemize} \item[I.2.] If $x \geq \sqrt{z}$, then we have the following two cases: \begin{itemize} \item[I.2.1] $xy \geq z$, then $\ell_X(h) = x+y$, otherwise \item[I.2.2] $xy \leq z$, then $\ell_X(h) = 2 \lceil z/x \rceil + x - y$; \end{itemize} and in both cases, the word $b^{y-u-1} a^v b a^{x-v} b^u$ is a geodesic for $h$ where $0 \leq u$, $0 \leq v < x$ and $z = ux+v$ (cf.~pages~24,\,32 and 33 in \cite{blachere2003}). \end{itemize} \item[II.] If $y < 0$, then we have the following cases. \begin{itemize} \item[II.1.] If $x \leq \sqrt{z - xy}$, then $\ell_X(h) = 2\lceil\, 2\sqrt{z-xy}\,\rceil - x + y$. Let $n = \lceil \, \sqrt{z-xy}\,\rceil-1$. Then \begin{itemize} \item there is either some $k \in \mathbb{N}$ with $1 \leq k \leq n$ such that we have $z-xy = n^2+k$, and $h$ has $a^{x-n} b^{-n-1} a^k b a^{n-k} b^{n+y}$ as a geodesic representative; or \item there is some $k \in \mathbb{N}$ with $0 \leq k \leq n$ such that we have $z-xy = (n+1)^2-k$ and $a^{x-n} b^{-k} a^{-1} b^{k-n-1} a^{n+1} b^{n+1+y}$ is a geodesic representative for $h$ (cf.~\cite[p.~24]{blachere2003}\footnote{Note that in \cite{blachere2003} there is an error in the second case.}). \end{itemize} % \item[II.2.] If $x \geq \sqrt{z - xy}$, then $\ell_X(h) = 2 \lceil z/x \rceil + x - y$ and $h$ has a geodesic representative of $b^{y-u-1} a^v b a^{x-v} b^u$ where $u,v \geq 0$, $v < x$ and $z = ux+v$ (cf.~\cite[pp.~24\,\&\,33]{blachere2003}). \end{itemize} \end{itemize} Notice that in each of the above cases, we have our desired result. \end{proof} From this lemma, we have the following result. \begin{corollary}\label{cor:max-7-t} If $w \in S^*$ is a geodesic of $\vH$ with respect to the generating set $S = \{a,a^{-1},t\}$, then $w$ contains at most 7 instances of the letter $t$. \end{corollary} \begin{proof} Let $w \in S^*$ be a word containing $8$ instances of $t$ of the form \[ w = a^{n_1} t a^{m_1} t a^{n_2} t a^{m_2} t a^{n_3} t a^{m_3} t a^{n_4} t a^{m_4} t, \] where $n_i,m_i\in \mathbb Z$, and notice that $\overline{w}$ belongs to the subgroup $\Heisenberg$. The Tietze transform given by $b = tat$ which we applied to obtain the generating set $S = \{a,a^{-1},t\}$ from \eqref{eq:presentation-1} yields an automorphism $\varphi \colon \vH \to \vH$ given by $\varphi(a) = a$, $\varphi(t) = t$, $\varphi(b) = tat$, and since $t^2 = 1$ we have $\varphi(b^k) = ta^kt$ for $k \in \mathbb{Z}$. Let $X = \{a,a^{-1},b,b^{-1}\}$ be a generating set for the subgroup $\Heisenberg$. Then from the word $w \in S^*$ we may construct a word \[ w_2 = a^{n_1} b^{m_1} a^{n_2} b^{m_2} a^{n_3} b^{m_3} a^{n_4} b^{m_4}\in X^* \] where $\overline{w_2} = \overline{w}$ since $\varphi(w_2) = w$. Moreover, $|w|_S = |w_2|_X + 8$. From Lemma~\ref{lem:heisenberg-geodesic}, we know that there is a word $w_3 \in X^*$, with $\overline{w_3}=\overline{w_2}$ and $|w_3|_X \leq |w_2|_X$, of the form \[ w_3 = a^{\alpha_1} b^{\beta_1} a^{\alpha_2} b^{\beta_2} a^{\alpha_3} b^{\beta_3} \ \ \text{or}\ \ w_3 = b^{\beta_1} a^{\alpha_1} b^{\beta_2} a^{\alpha_2} b^{\beta_3} a^{\alpha_3} \] where $\alpha_i,\beta_i\in\mathbb Z$ (possibly zero). We then see that $\overline{w}$ can be represented by a word of the form \[ w_4 = a^{\alpha_1} t a^{\beta_1} t a^{\alpha_2} t a^{\beta_2} t a^{\alpha_3} t a^{\beta_3} t \ \ \text{or}\ \ w_4 = t a^{\beta_1} t a^{\alpha_1} t a^{\beta_2} t a^{\alpha_2} t a^{\beta_3} t a^{\alpha_3} \] where \[|w_4|_S = |w_3|_X + 6 < |w_2|_X + 8 = |w|_S.\] Then $w$ cannot be a geodesic as we have a strictly shorter word $w_4$ that represents the same element. Thus, a geodesic of $\vH$ with respect to $S = \{a,a^{-1},t\}$ can contain at most $7$ instances of the letter $t$ as we can replace any subword with $8$ instances of $t$ with a strictly shorter word containing at most $7$ instances of $t$. \end{proof} From this corollary we may immediately obtain the following polynomial upper bound on the geodesic growth function. \setcounter{theoremx}{1} \TheoremVirtuallyHeisenberg \begin{proof} From Corollary~\ref{cor:max-7-t}, we see that any geodesic of $\vH$, with respect to the generating set $S$, must have the form \[ w = a^{m_1} t a^{m_2} t \cdots t a^{m_{k+1}} \] where $k \leq 7$ and each $m_i \in \mathbb{Z}$. Then with $k$ fixed and $r = |w|_S$, we see that there are at most $2^{k+1}$ choices for the sign of $m_1,m_2,\ldots,m_{k+1}$, and at most $\binom{r}{k}$ choices for the placement of the $t$'s in $w$. Thus, the geodesic growth function $\gamma_S(n)$ has an upper bound given by \[ \gamma_S(n) \leq \sum_{k=0}^7 \sum_{r=k}^n 2^{k+1} \binom{r}{k} \leq \sum_{k=0}^7 \sum_{r=k}^n 2^{k+1} r^k \leq 8 \cdot 2^8 n^{8} \] which gives the degree 8 polynomial upper bound. \end{proof} \section{Concluding Remarks and Open Questions}\label{sec:virt-heisenberg/concluding} The proof that the virtually 2-step nilpotent group in this chapter has polynomial geodesic growth relied heavily on work of \citeauthor{blachere2003} (see~\cref{lem:heisenberg-geodesic}) and does not appear to be generalisable in its current form. It is then natural to ask if there are nilpotent groups of higher step with analogous properties, in particular, we ask the following question. \begin{question} Is there a virtually $k$-step nilpotent group with polynomial geodesic growth for some $k \geqslant 3$, and if so, is there such an example for each $k$? \end{question} It follows from the work of \textcite[Theorem~2]{bass1972} that the usual growth rate of a virtually nilpotent group is polynomial of integer degree. Moreover, from \cref{thm:geodesic-growth} it is known that if a virtually abelian group has polynomial geodesic growth, then it must be of integer degree since the geodesic growth series is rational in this case. It is not known if there is a group with polynomial geodesic growth of a non-integer degree. \begin{question} Is there a group with polynomial geodesic growth of a non-integer degree? \end{question} Based on experimental results (see~\cite{githubcode}) we conjecture that the geodesic growth rate of $\vH$ with respect to the generating set $S$ can be bounded from above and below by polynomials of degree six (cf.~the volume growth is polynomial of degree four). We ask the following question. From \cref{thm:geodesic-growth} we know that if a virtually abelian group has polynomial geodesic growth, then its geodesic growth series is rational. However, it is unclear if this property is held by virtually nilpotent groups. From experimental results, it appears that the geodesic growth series of $\vH$ with respect to $S$ is not rational (see~\cite{githubcode}). \begin{question} Is the geodesic growth series for $\vH$ with respect to $S$ rational? \end{question} In this thesis we have taken steps towards a classification of polynomial geodesic growth, and more generally towards the study of the geodesic growth of virtually nilpotent groups. In particular, we characterised the geodesic growth of virtually abelian groups; and provided the first example of a group with polynomial geodesic growth that is not virtually abelian. The results in this thesis open up new questions and new techniques for obtaining characterisations.
{ "alphanum_fraction": 0.6778233796, "avg_line_length": 46.3344262295, "ext": "tex", "hexsha": "eb40cea31b363fff188c2a10178f5a3f8fb7d3b9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "06f7d5f3f5fa8e6bdb9aa48796223acd9ba4ae3d", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "alexbishop/phd-thesis", "max_forks_repo_path": "chapter/06_Virtually_Heisenberg.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "06f7d5f3f5fa8e6bdb9aa48796223acd9ba4ae3d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "alexbishop/phd-thesis", "max_issues_repo_path": "chapter/06_Virtually_Heisenberg.tex", "max_line_length": 510, "max_stars_count": null, "max_stars_repo_head_hexsha": "06f7d5f3f5fa8e6bdb9aa48796223acd9ba4ae3d", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "alexbishop/phd-thesis", "max_stars_repo_path": "chapter/06_Virtually_Heisenberg.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 5060, "size": 14132 }
% -*- TeX:Rnw:UTF-8 -*- % ---------------------------------------------------------------- % .R knitr file ************************************************ % ---------------------------------------------------------------- %% % \VignetteEngine{knitr::knitr} % \VignetteIndexEntry{} % \VignetteDepends{} % \VignettePackage{} \documentclass[a4paper,12pt]{article}\usepackage[]{graphicx}\usepackage[]{color} % maxwidth is the original width if it is less than linewidth % otherwise use linewidth (to make sure the graphics do not exceed the margin) \makeatletter \def\maxwidth{ % \ifdim\Gin@nat@width>\linewidth \linewidth \else \Gin@nat@width \fi } \makeatother \usepackage{Sweave} %\usepackage[slovene]{babel} \usepackage[utf8]{inputenc} %% must be here for Sweave encoding check \input{abpkg} \input{abcmd} \input{abpage} \usepackage{pgf,pgfarrows,pgfnodes,pgfautomata,pgfheaps,pgfshade} \usepackage{amsmath,amssymb} \usepackage{colortbl} \input{mysweave} \setkeys{Gin}{width=0.8\textwidth} % set graphicx parameter \usepackage{lmodern} \input{abfont} % ---------------------------------------------------------------- \IfFileExists{upquote.sty}{\usepackage{upquote}}{} \begin{document} %% Sweave settings for includegraphics default plot size (Sweave default is 0.8) %% notice this must be after begin{document} %%% \setkeys{Gin}{width=0.9\textwidth} % ---------------------------------------------------------------- \title{Development of package \pkg{pisar}} \author{A. Blejec} %\address{}% %\email{}% % %\thanks{}% %\subjclass{}% %\keywords{}% %\date{}% %\dedicatory{}% %\commby{}% \maketitle % ---------------------------------------------------------------- %\begin{abstract} % %\end{abstract} % ---------------------------------------------------------------- \tableofcontents \begin{Schunk} \begin{Soutput} Warning: package 'devtools' was built under R version 4.0.3 \end{Soutput} \begin{Soutput} Warning: package 'usethis' was built under R version 4.0.3 \end{Soutput} \end{Schunk} \clearpage \section{Preparation} Set package source directory \begin{Schunk} \begin{Sinput} > purl <- TRUE > (pkgPath <- gsub("/R/", "/R/!packages/", dirname(getwd()))) \end{Sinput} \begin{Soutput} [1] "C:/__D/OMIKE/pisar" \end{Soutput} \begin{Sinput} > (pkgPath <- gsub("/R/", "/R/!packages/", dirname(getwd()))) \end{Sinput} \begin{Soutput} [1] "C:/__D/OMIKE/pisar" \end{Soutput} \begin{Sinput} > (pkgName <- basename(dirname(getwd()))) \end{Sinput} \begin{Soutput} [1] "pisar" \end{Soutput} \begin{Sinput} > dir(pkgPath) \end{Sinput} \begin{Soutput} [1] "_COPYRIGHT" "_DESCRIPTION" "_pkgdown.yml" [4] "_VERSION" "data" "DESCRIPTION" [7] "devel" "doc" "docs" [10] "FAIRDOM.log" "gitHeadInfo.gin" "inst" [13] "man" "NAMESPACE" "out" [16] "pisar.prj" "pisar.prj.bak" "pisar.Rproj" [19] "R" "README.md" "vignettes" \end{Soutput} \begin{Sinput} > dir(file.path(pkgPath, "R")) \end{Sinput} \begin{Soutput} [1] "pisar-initialize.R" "README.md" \end{Soutput} \begin{Sinput} > fctFile <- c("pisar-initialize.Rnw") > (pkgFile <- gsub("\\.Rnw$", "\\.R", fctFile)) \end{Sinput} \begin{Soutput} [1] "pisar-initialize.R" \end{Soutput} \end{Schunk} Files and directories:\\[12pt] Original R source file(s): \url{pisar-initialize.Rnw}\\ Package directory: \url{C:/__D/OMIKE/pisar}\\ Package source file(s): \url{pisar-initialize.R} %\section{Function definitions} \clearpage %<<child=fctFile,eval=!purl>>= %@ \begin{Schunk} \begin{Sinput} > childtxt <- "" \end{Sinput} \end{Schunk} \begin{Schunk} \begin{Sinput} > for (i in 1:length(fctFile)) { + childtxt <- paste(childtxt, knit_child(file.path("../devel", fctFile[i]), + quiet = TRUE)) + } \end{Sinput} \end{Schunk} \clearpage \section{Export as source for package} perform steps for building a package. Extract code, make documentation files (\file{*.Rd}), build a package. \subsection{Extract code} Code is extracted to \url{pkgPath}. %<<export to package,eval=purl>>= %getwd() %purl(file.path("../devel",fctFile),output=file.path(pkgPath,"R",pkgFile)) %@ \begin{Schunk} \begin{Sinput} > childtxt <- "" > for (i in 1:length(fctFile)) { + cat(fctFile[i], "\n") + childtxt <- paste(childtxt, purl(file.path("../devel", fctFile[i]), + output = file.path(pkgPath, "R", pkgFile[i]))) + } \end{Sinput} \begin{Soutput} pisar-initialize.Rnw \end{Soutput} \begin{Soutput} processing file: ../devel/pisar-initialize.Rnw \end{Soutput} \begin{Soutput} | | | 0% | |.. | 3% | |... | 5% | |..... | 8% | |...... | 11% | |........ | 14% | |.......... | 16% | |........... | 19% | |............. | 22% | |............... | 24% | |................ | 27% | |.................. | 30% | |................... | 32% | |..................... | 35% | |....................... | 38% | |........................ | 41% | |.......................... | 43% | |............................ | 46% | |............................. | 49% | |............................... | 51% | |................................ | 54% | |.................................. | 57% | |.................................... | 59% | |..................................... | 62% | |....................................... | 65% | |......................................... | 68% | |.......................................... | 70% | |............................................ | 73% | |............................................. | 76% | |............................................... | 78% | |................................................. | 81% | |.................................................. | 84% | |.................................................... | 86% | |...................................................... | 89% | |....................................................... | 92% | |......................................................... | 95% | |.......................................................... | 97% | |............................................................| 100% \end{Soutput} \begin{Soutput} output file: C:/__D/OMIKE/pisar/R/pisar-initialize.R \end{Soutput} \begin{Sinput} > file.path(pkgPath, "R") \end{Sinput} \begin{Soutput} [1] "C:/__D/OMIKE/pisar/R" \end{Soutput} \begin{Sinput} > # dir(file.path(pkgPath,'R')) \end{Sinput} \end{Schunk} C:/__D/OMIKE/pisar/R/pisar-initialize.R \clearpage \subsection{Documentation} Probably not needed if we do the check? \begin{Schunk} \begin{Sinput} > devtools::document(pkgPath) \end{Sinput} \begin{Soutput} Updating pisar documentation \end{Soutput} \begin{Soutput} Loading pisar \end{Soutput} \begin{Soutput} Warning in setup_ns_exports(path, export_all, export_imports): Objects listed as exports, but not present in namespace: getMeta.list \end{Soutput} \begin{Soutput} Writing NAMESPACE Writing NAMESPACE \end{Soutput} \begin{Sinput} > usethis::use_package("knitr") \end{Sinput} \begin{Soutput} √ Setting active project to 'C:/__D/OMIKE/pisar' \end{Soutput} \begin{Soutput} Warning in if (delta < 0) {: the condition has length > 1 and only the first element will be used \end{Soutput} \begin{Soutput} Warning in if (delta > 0) {: the condition has length > 1 and only the first element will be used \end{Soutput} \begin{Soutput} * Refer to functions with `knitr::fun()` \end{Soutput} \begin{Sinput} > usethis::use_package("rio") \end{Sinput} \begin{Soutput} * Refer to functions with `rio::fun()` \end{Soutput} \begin{Sinput} > usethis::use_package("tools") \end{Sinput} \begin{Soutput} * Refer to functions with `tools::fun()` \end{Soutput} \begin{Sinput} > usethis::use_package("RCurl") \end{Sinput} \begin{Soutput} * Refer to functions with `RCurl::fun()` \end{Soutput} \begin{Sinput} > # usethis::use_package('httr') usethis::use_package('jsonlite') > usethis::use_build_ignore(c("devel")) \end{Sinput} \end{Schunk} \clearpage \subsection{Check} \begin{Schunk} \begin{Sinput} > # system.time(check <- devtools::check(pkgPath)) > system.time(miss <- devtools::missing_s3(pkgPath)) \end{Sinput} \begin{Soutput} Loading pisar \end{Soutput} \begin{Soutput} Warning in setup_ns_exports(path, export_all, export_imports): Objects listed as exports, but not present in namespace: getMeta.list \end{Soutput} \begin{Soutput} user system elapsed 0.33 0.05 0.38 \end{Soutput} \end{Schunk} \clearpage \subsection{Check results} \begin{Schunk} \begin{Sinput} > check() \end{Sinput} \begin{Soutput} Updating pisar documentation \end{Soutput} \begin{Soutput} Loading pisar \end{Soutput} \begin{Soutput} Warning in setup_ns_exports(path, export_all, export_imports): Objects listed as exports, but not present in namespace: getMeta.list \end{Soutput} \begin{Soutput} Writing NAMESPACE Writing NAMESPACE -- Building --------------------------------------------------------------------------------------------------- pisar -- Setting env vars: * CFLAGS : -Wall -pedantic * CXXFLAGS : -Wall -pedantic * CXX11FLAGS: -Wall -pedantic ------------------------------------------------------------------------------------------------------------------------ <U+221A> checking for file 'C:\__D\OMIKE\pisar/DESCRIPTION' (403ms) - preparing 'pisar': checking DESCRIPTION meta-information ... checking DESCRIPTION meta-information ... <U+221A> checking DESCRIPTION meta-information - installing the package to build vignettes ----------------------------------- - installing *source* package 'pisar' ... ** using staged installation ** R ** data *** moving datasets to lazyload DB ** inst ** byte-compile and prepare package for lazy loading ** help *** installing help indices converting help for package 'pisar' finding HTML links ... done fileName html fileType html fsummary html getLayer html getMeta html getRoot html out.path html pasteMeta html pisa html pisar html print.pISAmeta html readMeta html ** building package indices ** installing vignettes ** testing if installed package can be loaded from temporary location Error: package or namespace load failed for 'pisar' in namespaceExport(ns, exports): undefined exports: getMeta.list Error: loading failed Execution halted ERROR: loading failed - removing 'C:/Users/ablejec/AppData/Local/Temp/Rtmp8cqwOg/Rinst29748419e4de4/pisar' ----------------------------------- ERROR: package installation failed \end{Soutput} \begin{Soutput} Error in (function (command = NULL, args = character(), error_on_status = TRUE, : System command 'Rcmd.exe' failed, exit status: 1, stdout + stderr: E> * checking for file 'C:\__D\OMIKE\pisar/DESCRIPTION' ... OK E> * preparing 'pisar': E> * checking DESCRIPTION meta-information ... OK E> * installing the package to build vignettes E> ----------------------------------- E> * installing *source* package 'pisar' ... E> ** using staged installation E> ** R E> ** data E> *** moving datasets to lazyload DB E> ** inst E> ** byte-compile and prepare package for lazy loading E> ** help E> *** installing help indices E> converting help for package 'pisar' E> finding HTML links ... done E> fileName html E> fileType html E> fsummary html E> getLayer html E> getMeta html E> getRoot html E> out.path html E> pasteMeta html E> pisa html E> pisar html E> print.pISAmeta html E> readMeta html E> ** building package indices E> ** installing vignettes E> ** testing if installed package can be loaded from temporary location E> Error: package or namespace load failed for 'pisar' in namespaceExport(ns, exports): E> undefined exports: getMeta.list E> Error: loading failed E> Execution halted E> ERROR: loading failed E> * removing 'C:/Users/ablejec/AppData/Local/Temp/Rtmp8cqwOg/Rinst29748419e4de4/pisar' E> ----------------------------------- E> ERROR: package installation failed \end{Soutput} \end{Schunk} \clearpage \subsection{Build a package} Build the package \begin{Schunk} \begin{Sinput} > devtools::build(pkgPath, manual = TRUE, quiet = FALSE) \end{Sinput} \begin{Soutput} checking for file 'C:\__D\OMIKE\pisar/DESCRIPTION' ... <U+221A> checking for file 'C:\__D\OMIKE\pisar/DESCRIPTION' (377ms) - preparing 'pisar': checking DESCRIPTION meta-information ... checking DESCRIPTION meta-information ... <U+221A> checking DESCRIPTION meta-information - installing the package to build vignettes ----------------------------------- - installing *source* package 'pisar' ... ** using staged installation ** R ** data *** moving datasets to lazyload DB ** inst ** byte-compile and prepare package for lazy loading ** help *** installing help indices converting help for package 'pisar' finding HTML links ... done fileName html fileType html fsummary html getLayer html getMeta html getRoot html out.path html pasteMeta html pisa html pisar html print.pISAmeta html readMeta html ** building package indices ** installing vignettes ** testing if installed package can be loaded from temporary location Error: package or namespace load failed for 'pisar' in namespaceExport(ns, exports): undefined exports: getMeta.list Error: loading failed Execution halted ERROR: loading failed - removing 'C:/Users/ablejec/AppData/Local/Temp/RtmpU9wiG7/Rinst2a6ac20e31259/pisar' ----------------------------------- ERROR: package installation failed \end{Soutput} \begin{Soutput} Error in (function (command = NULL, args = character(), error_on_status = TRUE, : System command 'Rcmd.exe' failed, exit status: 1, stdout + stderr: E> * checking for file 'C:\__D\OMIKE\pisar/DESCRIPTION' ... OK E> * preparing 'pisar': E> * checking DESCRIPTION meta-information ... OK E> * installing the package to build vignettes E> ----------------------------------- E> * installing *source* package 'pisar' ... E> ** using staged installation E> ** R E> ** data E> *** moving datasets to lazyload DB E> ** inst E> ** byte-compile and prepare package for lazy loading E> ** help E> *** installing help indices E> converting help for package 'pisar' E> finding HTML links ... done E> fileName html E> fileType html E> fsummary html E> getLayer html E> getMeta html E> getRoot html E> out.path html E> pasteMeta html E> pisa html E> pisar html E> print.pISAmeta html E> readMeta html E> ** building package indices E> ** installing vignettes E> ** testing if installed package can be loaded from temporary location E> Error: package or namespace load failed for 'pisar' in namespaceExport(ns, exports): E> undefined exports: getMeta.list E> Error: loading failed E> Execution halted E> ERROR: loading failed E> * removing 'C:/Users/ablejec/AppData/Local/Temp/RtmpU9wiG7/Rinst2a6ac20e31259/pisar' E> ----------------------------------- E> ERROR: package installation failed \end{Soutput} \begin{Sinput} > devtools::load_all() \end{Sinput} \begin{Soutput} Loading pisar \end{Soutput} \begin{Soutput} Warning in setup_ns_exports(path, export_all, export_imports): Objects listed as exports, but not present in namespace: getMeta.list \end{Soutput} \end{Schunk} Install \begin{Schunk} \begin{Sinput} > devtools::install(pkgPath) \end{Sinput} \begin{Soutput} utf8 (1.1.4 -> 1.2.1 ) [CRAN] crayon (1.3.4 -> 1.4.1 ) [CRAN] cli (2.2.0 -> 2.5.0 ) [CRAN] vctrs (0.3.6 -> 0.3.8 ) [CRAN] rlang (0.4.10 -> 0.4.11 ) [CRAN] pillar (1.4.7 -> 1.6.0 ) [CRAN] lifecycle (0.2.0 -> 1.0.0 ) [CRAN] ellipsis (0.3.1 -> 0.3.2 ) [CRAN] stringi (1.5.3 -> 1.6.1 ) [CRAN] Rcpp (1.0.5 -> 1.0.6 ) [CRAN] tibble (3.0.4 -> 3.1.1 ) [CRAN] cpp11 (0.2.5 -> 0.2.7 ) [CRAN] tidyselect (1.1.0 -> 1.1.1 ) [CRAN] forcats (0.5.0 -> 0.5.1 ) [CRAN] mime (0.9 -> 0.10 ) [CRAN] xfun (0.19 -> 0.22 ) [CRAN] openssl (1.4.3 -> 1.4.4 ) [CRAN] curl (4.3 -> 4.3.1 ) [CRAN] bitops (1.0-6 -> 1.0-7 ) [CRAN] data.table (1.13.2 -> 1.14.0 ) [CRAN] haven (2.3.1 -> 2.4.1 ) [CRAN] highr (0.8 -> 0.9 ) [CRAN] RCurl (1.98-1.2 -> 1.98-1.3) [CRAN] rio (0.5.16 -> 0.5.26 ) [CRAN] knitr (1.30 -> 1.33 ) [CRAN] \end{Soutput} \begin{Soutput} Installing 25 packages: utf8, crayon, cli, vctrs, rlang, pillar, lifecycle, ellipsis, stringi, Rcpp, tibble, cpp11, tidyselect, forcats, mime, xfun, openssl, curl, bitops, data.table, haven, highr, RCurl, rio, knitr \end{Soutput} \begin{Soutput} Error: (converted from warning) package 'knitr' is in use and will not be installed \end{Soutput} \begin{Sinput} > ## str(out)(shell(paste( file.path(R.home('bin'),'Rcmd.exe'), ' INSTALL > ## --no-multiarch --with-keep.source', pkgPath) ,intern=FALSE)) \end{Sinput} \end{Schunk} Load \begin{Schunk} \begin{Sinput} > devtools::load_all() \end{Sinput} \begin{Soutput} Loading pisar \end{Soutput} \begin{Soutput} Warning in setup_ns_exports(path, export_all, export_imports): Objects listed as exports, but not present in namespace: getMeta.list \end{Soutput} \begin{Sinput} > cat("Package:", pkgName, "\n") \end{Sinput} \begin{Soutput} Package: pisar \end{Soutput} \begin{Sinput} > library(pkgName, character.only = TRUE) > help(package = (pkgName)) \end{Sinput} \begin{Soutput} Warning in file.show(outFile, delete.file = TRUE, title = gettextf("Documentation for package %s", : '"C:\Program Files (x86)\EmEditor\EmEditor.exe"' not found \end{Soutput} \end{Schunk} \section{PDF documentation} \begin{Schunk} \begin{Sinput} > (pkgName <- basename(dirname(getwd()))) \end{Sinput} \begin{Soutput} [1] "pisar" \end{Soutput} \begin{Sinput} > (instPath <- find.package(pkgName)) \end{Sinput} \begin{Soutput} [1] "C:/__D/OMIKE/pisar" \end{Soutput} \begin{Sinput} > pdfFile <- file.path(getwd(), paste(pkgName, "pdf", sep = ".")) > if (file.exists(pdfFile)) file.remove(pdfFile) > system(paste(shQuote(file.path(R.home("bin"), "R")), "CMD", "Rd2pdf", shQuote(instPath))) \end{Sinput} \begin{Soutput} [1] 1 \end{Soutput} \begin{Sinput} > dir(pattern = pkgName) \end{Sinput} \begin{Soutput} [1] "pisar-functions.Rnw" "pisar-initialize.Rnw" [3] "pisar-initialize.synctex(busy)" "pisar-initialize.tex" [5] "pisar-makePkg.log" "pisar-makePkg.pdf" [7] "pisar-makePkg.Rnw" "pisar-makePkg.synctex" [9] "pisar-makePkg.tex" \end{Soutput} \end{Schunk} \begin{Schunk} \begin{Sinput} > help(package = (pkgName), help_type = "pdf") \end{Sinput} \begin{Soutput} Warning in file.show(outFile, delete.file = TRUE, title = gettextf("Documentation for package %s", : '"C:\Program Files (x86)\EmEditor\EmEditor.exe"' not found \end{Soutput} \end{Schunk} Send package to R Windows builder \begin{Schunk} \begin{Sinput} > devtools::build_win(pkgPath) \end{Sinput} \end{Schunk} %<<child='encChild2.Rnw'>>= %@ % ---------------------------------------------------------------- %\bibliographystyle{chicago} %\addcontentsline{toc}{section}{\refname} %\bibliography{ab-general} %-------------------------------------------------------------- %\clearpage %\appendix %\phantomsection\addcontentsline{toc}{section}{\appendixname} %\section{\R\ funkcije} %\input{} \clearpage \section*{SessionInfo} {\small Windows 10 x64 (build 19042) \begin{itemize}\raggedright \item R version 4.0.2 (2020-06-22), \verb|x86_64-w64-mingw32| \item Locale: \verb|LC_COLLATE=Slovenian_Slovenia.1250|, \verb|LC_CTYPE=Slovenian_Slovenia.1250|, \verb|LC_MONETARY=Slovenian_Slovenia.1250|, \verb|LC_NUMERIC=C|, \verb|LC_TIME=Slovenian_Slovenia.1250| \item Running under: \verb|Windows 10 x64 (build 19042)| \item Matrix products: default \item Base packages: base, datasets, graphics, grDevices, methods, stats, utils \item Other packages: devtools~2.3.2, knitr~1.30, pisar~0.1.0.9000, usethis~2.0.0 \item Loaded via a namespace (and not attached): assertthat~0.2.1, bitops~1.0-6, callr~3.5.1, cellranger~1.1.0, cli~2.2.0, compiler~4.0.2, crayon~1.3.4, curl~4.3, data.table~1.13.2, desc~1.2.0, digest~0.6.27, ellipsis~0.3.1, evaluate~0.14, fansi~0.4.2, forcats~0.5.0, foreign~0.8-80, formatR~1.7, fs~1.5.0, glue~1.4.2, haven~2.3.1, hms~1.0.0, httr~1.4.2, jsonlite~1.7.2, lifecycle~0.2.0, magrittr~2.0.1, memoise~1.1.0, openxlsx~4.2.3, pillar~1.4.7, pkgbuild~1.2.0, pkgconfig~2.0.3, pkgload~1.1.0, prettyunits~1.1.1, processx~3.4.5, ps~1.5.0, purrr~0.3.4, R6~2.5.0, Rcpp~1.0.5, RCurl~1.98-1.2, readxl~1.3.1, remotes~2.2.0, rio~0.5.16, rlang~0.4.10, roxygen2~7.1.1, rprojroot~2.0.2, rstudioapi~0.13, sessioninfo~1.1.1, stringi~1.5.3, stringr~1.4.0, testthat~3.0.1, tibble~3.0.4, tools~4.0.2, vctrs~0.3.6, withr~2.3.0, xfun~0.19, xml2~1.3.2, zip~2.1.1 \end{itemize} Project path:\verb' C:/__D/OMIKE/pisar '\\ Main file :\verb' ../devel/pisar-makePkg.Rnw ' \subsection*{View as vignette} Project files can be viewed by pasting this code to \R\ console:\\ \begin{Schunk} \begin{Sinput} > projectName <-"pisar"; mainFile <-"pisar-makePkg" \end{Sinput} \end{Schunk} \begin{Schunk} \begin{Sinput} > commandArgs() > library(tkWidgets) > openPDF(file.path(dirname(getwd()),"doc", > paste(mainFile,"PDF",sep="."))) > viewVignette("viewVignette", projectName, # > file.path("../devel",paste(mainFile,"Rnw",sep="."))) > # \end{Sinput} \end{Schunk} \vfill \hrule \vspace{3pt} \footnotesize{ %Revision \SVNId\hfill (c) A. Blejec%\input{../_COPYRIGHT.} %\SVNRevision ~/~ \SVNDate \noindent \texttt{Git Revision: \gitCommitterUnixDate \gitAbbrevHash{} (\gitCommitterDate)} \hfill \copyright A. Blejec\\ \texttt{ \gitReferences} \hfill \verb'../devel/pisar-makePkg.Rnw'\\ } \end{document} % ----------------------------------------------------------------
{ "alphanum_fraction": 0.4674397228, "avg_line_length": 40.5243553009, "ext": "tex", "hexsha": "48e1b36753523388231529d5aea1be034acbcbaf", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-11-06T23:07:31.000Z", "max_forks_repo_forks_event_min_datetime": "2019-11-06T23:07:31.000Z", "max_forks_repo_head_hexsha": "8a8dc568c808e6dda8184c578b3e6b4cb71c66cc", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "NIB-SI/pisar", "max_forks_repo_path": "devel/pisar-makePkg.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "8a8dc568c808e6dda8184c578b3e6b4cb71c66cc", "max_issues_repo_issues_event_max_datetime": "2022-03-06T21:52:34.000Z", "max_issues_repo_issues_event_min_datetime": "2021-08-22T15:27:08.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "NIB-SI/pisar", "max_issues_repo_path": "devel/pisar-makePkg.tex", "max_line_length": 5321, "max_stars_count": 1, "max_stars_repo_head_hexsha": "8a8dc568c808e6dda8184c578b3e6b4cb71c66cc", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "NIB-SI/pisar", "max_stars_repo_path": "devel/pisar-makePkg.tex", "max_stars_repo_stars_event_max_datetime": "2020-04-07T11:28:28.000Z", "max_stars_repo_stars_event_min_datetime": "2020-04-07T11:28:28.000Z", "num_tokens": 7369, "size": 28286 }
% !TEX root = ../main.tex \newpage \section*{Abstract} \addcontentsline{toc}{subsection}{Abstract} The synchronisation of networks of oscillators, network topology and network plasticity can only be understood from a holistic approach, and each domain is investigated in relation to the other. The Theta neuron model is analysed to understand feedback mechanisms between the frequency and phase response through the electrical current. Different network topologies are then described in terms of their degree distribution. Networks of Theta neurons are studied using the synchronisation of the mean-field. Inspired by the process of synaptic plasticity, learning rules are then established to observe emergent network topologies.
{ "alphanum_fraction": 0.825136612, "avg_line_length": 104.5714285714, "ext": "tex", "hexsha": "d95e119cd2aa974349f7701beeb8a7588ae5cf2c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "506a4e8aba392330f8a6ecc6b229e2c8322b8e83", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "simonaertssen/AdaptiveNeuronalNetworks", "max_forks_repo_path": "Writing/Frontmatter/Abstract.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "506a4e8aba392330f8a6ecc6b229e2c8322b8e83", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "simonaertssen/AdaptiveNeuronalNetworks", "max_issues_repo_path": "Writing/Frontmatter/Abstract.tex", "max_line_length": 631, "max_stars_count": null, "max_stars_repo_head_hexsha": "506a4e8aba392330f8a6ecc6b229e2c8322b8e83", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "simonaertssen/AdaptiveNeuronalNetworks", "max_stars_repo_path": "Writing/Frontmatter/Abstract.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 138, "size": 732 }
\documentclass[a4paper,12pt,twoside]{memoir} \usepackage{longtable} \usepackage{btp} % Use the trainermanual package option (i.e. \usepackage[trainermanual]{btp}) to generate the Trainer's version of the manual %\usepackage[ % noinfo, % cam, % cross, % crosses as marks % a4, % width=6.25in, % the width of the galley % height=9.25in, % the height of the galley % center % actual page is centered on the galley %]{crop} % Set some Workshop specific info \setWorkshopTitle{Some Workshop} \setWorkshopVenue{Some Venue} \setWorkshopDate{A Date} \setWorkshopAuthor{ Workshop Author 1\\ Workshop Author 2\\ } \begin{document} % % Workshop Title Page % \workshoptitlepage % % CC-BY % \input{licences/licence.tex} \clearpage \tableofcontents \chapter{Workshop Information} \clearpage % % Trainers Page % \input{010_trainers/trainers.tex} % % Workshop Preamble % \input{015_preamble/preamble.tex} % % Start of modules % Switch chapter styling to module % \chapterstyle{module} % % End of modules % Switch back to normal workshop chapter styling % \chapterstyle{workshop} \chapter{Space for Personal Notes or Feedback} \clearpage % % Some empty ruled comments pages % \myruledpage{0cm}{1cm} \myruledpage{0cm}{1cm} \myruledpage{0cm}{1cm} \myruledpage{0cm}{1cm} \end{document}
{ "alphanum_fraction": 0.7181409295, "avg_line_length": 17.7866666667, "ext": "tex", "hexsha": "610f97f871f8b71e68b5b61db08d17a5ba629057", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8900d22fafc72f5a63f0e6f1160125883e699ac5", "max_forks_repo_licenses": [ "CC-BY-3.0", "OLDAP-2.2.1" ], "max_forks_repo_name": "BPA-CSIRO-Workshops/btp-worksop-mgn", "max_forks_repo_path": "template.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "8900d22fafc72f5a63f0e6f1160125883e699ac5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-3.0", "OLDAP-2.2.1" ], "max_issues_repo_name": "BPA-CSIRO-Workshops/btp-worksop-mgn", "max_issues_repo_path": "template.tex", "max_line_length": 145, "max_stars_count": null, "max_stars_repo_head_hexsha": "8900d22fafc72f5a63f0e6f1160125883e699ac5", "max_stars_repo_licenses": [ "CC-BY-3.0", "OLDAP-2.2.1" ], "max_stars_repo_name": "BPA-CSIRO-Workshops/btp-worksop-mgn", "max_stars_repo_path": "template.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 408, "size": 1334 }
\section{Conclusion} This report originates from the course entitled Open Collaboration and Peer-Production (i290m-ocpp) \cite{classweb2013} at UC Berkeley I School , with the main goals to get {\it hands-on} experience of joining and contributing to open collaboration projects, and to understand the underpinning incentive mechanisms and contribution dynamics. Joining open source projects is not as simple as one might expect: multiple technical, incentive, social and emotional issues as well as imperfect communication between communities and newbies might prevent efficient joining. \\ \noindent The experience acquired in the class guided our field exploration of joining scripts and how they are influenced by organization structures. Acquiring and maintaining a critical mass of developers as well as solving governance problems are among the most critical organizational issues in open collaboration. Here, we have analyzed the problem from our viewpoint of almost all newbies joining existing open source projects.\\ \noindent The choice for project and type of contribution was completely unconstrained, and while limited to twenty-four students we could gather a large variety of experiences ranging from hardcore open source software projects (e.g. bitcoin), to citizen science, to community wikis. From these heterogenous experiences, using first qualitative reporting complemented by a survey, we could identify and characterize most important issues enabling or on the contrary raising barriers against efficient open source project on boarding.\\ \noindent Our collective experience highlights the need for human-to-human communication: personal contact is an efficient outreach method and helps significantly guide participants into their first contributions. However, many of the projects we have joined lacked proper documentation that would facilitate joining. We openly ask whether some communities really desire newcomers for their project or at least, whether appropriate governance structures are in place to welcome new contributors. Our results, and experience writing this report, highlighted the prevalence and the importance benevolent dictatorship as the most prominent model of management in open source projects. Besides onboarding documentation and welcoming community structure, broad visibility on tasks ownership was seen as an helpful in the joining process. We also analyzed business models and funding : our sample included mix of projects funded by academic grants, donations or corporation supported. These funding options have a clear impact on the choice of licensing: academic and and donations supported open source projects used more restrictive licenses, such as GNU GPL and BSD while more business oriented projects used most prominently Apache license. On the contrary to previous studies \cite{belenzon2012} and obviously with much less points, we found that license is not an important factor for project choice. We surmise, however, that our sample of 24 UC Berkeley students is highly biased, and poorly represents the broader variety of motivations for joining open source projects. However, it might give a reasonably reliable view of the intentions of graduate students when joining open source projects.\\ \noindent While limited, this field study suggests some interesting directions for future research. Students joining few years old projects was significantly different than for those having joined more established projects. We would like to see future research directed towards illuminating the differences in joining experiences between participants who take part in projects of different development stages. \\ \noindent Finally, this collective report also reflects the experience gained from a first attempt to teach open collaboration and peer-production at UC Berkeley in a way that respects as much as possible the spirit of open source. Pooling knowledge and resources for the sake of achieving such a collective work in a limited time has been a great challenge and an opportunity to learn about the somewhat frustrating misalignment of incentives that necessarily create tensions with the ultimate goal of achieving and delivering a collective work. We leave a report, which is less than perfect, but precisely strongly emphasizes on the current limitations of this experience. We hope that others will take on the challenge to reproduce and improve the class \cite{classweb2013}, and possibly build further knowledge on the intricate relationship between joining open source projects and the way communities manage onboarding of newbies. Accumulating practical knowledge on the onboarding process is critical for both the increasing interest by enthusiasts in joining projects, and for the long-term sustainability of open source communities.
{ "alphanum_fraction": 0.8327468433, "avg_line_length": 301.9375, "ext": "tex", "hexsha": "84c35e6a55de1d630e693750ed1c654399692589", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "02982821412ef169ea08eb32b1b12929501d4624", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sbenthall/i290m-ocpp-site", "max_forks_repo_path": "report/chapters/070_conclusion.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "02982821412ef169ea08eb32b1b12929501d4624", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sbenthall/i290m-ocpp-site", "max_issues_repo_path": "report/chapters/070_conclusion.tex", "max_line_length": 1700, "max_stars_count": 1, "max_stars_repo_head_hexsha": "02982821412ef169ea08eb32b1b12929501d4624", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sbenthall/i290m-ocpp-site", "max_stars_repo_path": "report/chapters/070_conclusion.tex", "max_stars_repo_stars_event_max_datetime": "2017-02-25T01:00:07.000Z", "max_stars_repo_stars_event_min_datetime": "2017-02-25T01:00:07.000Z", "num_tokens": 837, "size": 4831 }
\section{Managing Groups of Workers \experimental} \label{worker-groups} When managing a set of workers, a central actor often dispatches requests to a set of workers. For this purpose, the class \lstinline^actor_pool^ implements a lightweight abstraction for managing a set of workers using a dispatching policy. Unlike groups, pools usually own their workers. Pools are created using the static member function \lstinline^make^, which takes either one argument (the policy) or three (number of workers, factory function for workers, and dispatching policy). After construction, one can add new workers via messages of the form \texttt{('SYS', 'PUT', worker)}, remove workers with \texttt{('SYS', 'DELETE', worker)}, and retrieve the set of workers as \lstinline^vector<actor>^ via \texttt{('SYS', 'GET')}. An actor pool takes ownership of its workers. When forced to quit, it sends an exit messages to all of its workers, forcing them to quit as well. The pool also monitors all of its workers. Pools do not cache messages, but enqueue them directly in a workers mailbox. Consequently, a terminating worker loses all unprocessed messages. For more advanced caching strategies, such as reliable message delivery, users can implement their own dispatching policies. \subsection{Dispatching Policies} A dispatching policy is a functor with the following signature: \begin{lstlisting} using uplock = upgrade_lock<detail::shared_spinlock>; using policy = std::function<void (actor_system& sys, uplock& guard, const actor_vec& workers, mailbox_element_ptr& ptr, execution_unit* host)>; \end{lstlisting} The argument \lstinline^guard^ is a shared lock that can be upgraded for unique access if the policy includes a critical section. The second argument is a vector containing all workers managed by the pool. The argument \lstinline^ptr^ contains the full message as received by the pool. Finally, \lstinline^host^ is the current scheduler context that can be used to enqueue workers into the corresponding job queue. The actor pool class comes with a set predefined policies, accessible via factory functions, for convenience. \begin{lstlisting} actor_pool::policy actor_pool::round_robin(); \end{lstlisting} This policy forwards incoming requests in a round-robin manner to workers. There is no guarantee that messages are consumed, i.e., work items are lost if the worker exits before processing all of its messages. \begin{lstlisting} actor_pool::policy actor_pool::broadcast(); \end{lstlisting} This policy forwards \emph{each} message to \emph{all} workers. Synchronous messages to the pool will be received by all workers, but the client will only recognize the first arriving response message---or error---and discard subsequent messages. Note that this is not caused by the policy itself, but a consequence of forwarding synchronous messages to more than one actor. \begin{lstlisting} actor_pool::policy actor_pool::random(); \end{lstlisting} This policy forwards incoming requests to one worker from the pool chosen uniformly at random. Analogous to \lstinline^round_robin^, this policy does not cache or redispatch messages. \begin{lstlisting} using join = function<void (T&, message&)>; using split = function<void (vector<pair<actor, message>>&, message&)>; template <class T> static policy split_join(join jf, split sf = ..., T init = T()); \end{lstlisting} This policy models split/join or scatter/gather work flows, where a work item is split into as many tasks as workers are available and then the individuals results are joined together before sending the full result back to the client. The join function is responsible for ``glueing'' all result messages together to create a single result. The function is called with the result object (initialed using \lstinline^init^) and the current result messages from a worker. The first argument of a split function is a mapping from actors (workers) to tasks (messages). The second argument is the input message. The default split function is a broadcast dispatching, sending each worker the original request.
{ "alphanum_fraction": 0.7654291795, "avg_line_length": 45.4731182796, "ext": "tex", "hexsha": "87423449751e629849d25413ee912310cc050b1f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "bee96d84bbc95414df5084b2d65f4886ba731558", "max_forks_repo_licenses": [ "BSL-1.0", "BSD-3-Clause" ], "max_forks_repo_name": "dosuperuser/actor-framework", "max_forks_repo_path": "doc/tex/ManagingGroupsOfWorkers.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "bee96d84bbc95414df5084b2d65f4886ba731558", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSL-1.0", "BSD-3-Clause" ], "max_issues_repo_name": "dosuperuser/actor-framework", "max_issues_repo_path": "doc/tex/ManagingGroupsOfWorkers.tex", "max_line_length": 79, "max_stars_count": 1, "max_stars_repo_head_hexsha": "bee96d84bbc95414df5084b2d65f4886ba731558", "max_stars_repo_licenses": [ "BSL-1.0", "BSD-3-Clause" ], "max_stars_repo_name": "dosuperuser/actor-framework", "max_stars_repo_path": "doc/tex/ManagingGroupsOfWorkers.tex", "max_stars_repo_stars_event_max_datetime": "2021-03-06T19:51:07.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-06T19:51:07.000Z", "num_tokens": 922, "size": 4229 }
\section{Light Armor Training}\label{perk:lightArmorTraining} \textbf{Cost:} 250CP\\ \textbf{Requirements:} Cloth Armor Training I\\ \textbf{Passive, Repeatable, Source(250 Gold)}\\ You are trained with light.\\ You add your level to Dodge and Evade checks while equipped with armor with the "Light" descriptor.\\ \\ Level Progression:\\ \\ \rowcolors{2}{lightgray}{white} \begin{tabular}{l | l | l | l} Level & CP Cost & Gold Cost & Effect\\ II & 550CP & 550 Gold & +1d4 Evade, +2 Dodge\\ III & 1,200CP & 1,200 Gold & +2d4 Evade, +4 Dodge\\ IV & 2,500CP & 2,500 Gold & +3d4 Evade, +6 Dodge\\ \end{tabular}
{ "alphanum_fraction": 0.6895424837, "avg_line_length": 34, "ext": "tex", "hexsha": "a5ab4f5034816439b3f3eb4a124fb78af1fb545e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_forks_repo_path": "perks/martial/armorTraining/lightarmortraining.tex", "max_issues_count": 155, "max_issues_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_issues_repo_issues_event_max_datetime": "2022-03-03T13:49:05.000Z", "max_issues_repo_issues_event_min_datetime": "2018-03-18T13:19:57.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_issues_repo_path": "perks/martial/armorTraining/lightarmortraining.tex", "max_line_length": 101, "max_stars_count": 6, "max_stars_repo_head_hexsha": "73781f7cd7035b927a35199af56f9da2ad2c2e95", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "NTrixner/RaggedLandsPenAndPaper", "max_stars_repo_path": "perks/martial/armorTraining/lightarmortraining.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-03T09:32:08.000Z", "max_stars_repo_stars_event_min_datetime": "2018-03-13T09:33:31.000Z", "num_tokens": 218, "size": 612 }
\documentclass[10pt,twocolumn]{article} \usepackage{cite} \usepackage{amsmath,amssymb,amsfonts,amsthm} \usepackage{multicol} \setlength{\columnsep}{0.75cm} \usepackage{caption} \usepackage{graphicx} \usepackage{csquotes} \usepackage{todonotes} \def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} \usepackage{hyperref} \usepackage[margin=1.75cm]{geometry} \usepackage[hpos=0.72\paperwidth,vpos=0.97\paperheight,angle=0,scale=0.8]{draftwatermark} \SetWatermarkText{Preprint} \SetWatermarkLightness{0.5} \SetWatermarkText{Preprint of preliminary work} \newtheorem{theorem}{Theorem}[section] \newtheorem{corollary}{Corollary}[theorem] \newtheorem{lemma}[theorem]{Lemma} \title{Optimally Reliable \& Cheap Payment Flows on the Lightning Network} \author{Rene Pickhardt \& Stefan Richter\thanks{The authors have contributed to this work in equal measure. The order merely reflects the fact that this line of research was initiated by Rene Pickhardt.}} \begin{document} \maketitle \begin{abstract} Today, payment paths in Bitcoin's Lightning Network are found by searching for shortest paths on the fee graph. We enhance this approach in two dimensions. Firstly, we take into account the probability of a payment actually being possible due to the unknown balance distributions in the channels. Secondly, we use minimum cost flows as a proper generalization of shortest paths to multi-part payments (MPP). In particular we show that under plausible assumptions about the balance distributions we can find the most likely MPP for any given set of senders, recipients and amounts by solving for a (generalized) integer minimum cost flow with a separable and convex cost function. Polynomial time exact algorithms as well as approximations are known for this optimization problem. We present a round-based algorithm of min-cost flow computations for delivering large payment amounts over the Lightning Network. This algorithm works by updating the probability distributions with the information gained from both successful and unsuccessful paths on prior rounds. In all our experiments a single digit number of rounds sufficed to deliver payments of sizes that were close to the total local balance of the sender. Early experiments indicate that our approach increases the size of payments that can be reliably delivered by several orders of magnitude compared to the current state of the art. We observe that finding the cheapest multi-part payments is an NP-hard problem considering the current fee structure and propose dropping the base fee to make it a linear min-cost flow problem. Finally, we discuss possibilities for maximizing the probability while at the same time minimizing the fees of a flow. While this turns out to be a hard problem in general as well --- even in the single path case --- it appears to be surprisingly tractable in practice. \end{abstract} %========================================================================== \section{Introduction} The Lightning Network is a payment channel network using source-based onion routing to deliver payments from senders to recipients. A necessary condition for a single onion package to be delivered successfully is that the onion follows a path with sufficient liquidity. In this context sufficient liquidity does not just mean that the publicly known channel capacities of the channels on the path between sender and recipient are larger than the payment amount. Rather, every node along the path has to own enough of the channel capacity as their local balance to be able to forward the amount to the next hop. As broadcasting the balance values would hinder the scalability of the Lightning Network they are generally kept private and thus unknown to other nodes. Currently the sender node mitigates this uncertainty by entering a trial-and-error loop for delivering payments. However, past experiments have demonstrated that payments are often failing, in particular when the amounts to be delivered are increasing\cite{DBLP:journals/corr/abs-1911-09432,DBLP:journals/corr/abs-2006-14358,lange2021impact, pickhardt2021security}. Current implementations largely find candidate paths for the trial-and-error-loop by solving shortest path problems or generalizations like $k$-shortest paths on the weighted channel graph, where the weights correspond to the routing fees charged by nodes forwarding a payment along a channel.\footnote{We also observe a combination of the fee function with features like a penalty for longer CLTV values, prior experiences of using the channel and a bias against smaller channels.} That approach tries to find the cheapest payment path for the sender but does not systematically factor in success probabilities. As a consequence, the payment loop might try a large number of cheapest but unreliable paths before timing out instead of using slightly more expensive but vastly more reliable paths. It also does not produce an optimal split of the payment into multiple paths. Software implementations of the Lightning Network protocol have mainly focused on three strategies for handling the uncertainty of sending a payment. \begin{enumerate} \item Incentivizing the path finding algorithm to favor larger channels.\footnote{\url{https://lists.ozlabs.org/pipermail/c-lightning/2021-May/000203.html}}%clightning dev \item Ad-hoc splitting of large payment amounts into smaller ones after failed attempts using a technique called multi-part payments (MPP). \item Using provenance scores of nodes and channels and other data collected during operation of a node to estimate which nodes and channels might be reliable. \end{enumerate} In this work we are developing a general technique that is achieving the effects of these rather ad-hoc techniques in a systematic fashion. This text largely builds upon and extends prior research which pointed out that the uncertainty of channel balance values can be mathematically modeled to arrive at a probabilistic path finding scheme\cite{pickhardt2021security}. While this earlier work demonstrated a significant reduction in failed attempts while delivering payments it still mostly kept the perspective of a single path finding problem. It has long been a folklore view that delivering payments on the Lightning Network can be modeled as a network flow problem. In what follows we show that the discovery of the most likely multi-path payment is equivalent to solving a (generalized) min-cost flow problem in which the negative logarithms of channel success probabilities are considered as the cost of using a channel. The channel success probabilities are priors that have to be estimated, through sampling for example. Under the assumption of an independent uniform balance distribution on each channel as the prior, finding the most probable multi-part payment for a given amount from a sender to a recipient can be usefully modeled as solving an integer min-cost flow problem with a separable convex cost function. While in general min-cost flow problems are complex optimization problems, the above mentioned subclass is known to have polynomial time solutions with a runtime complexity of $O(m \cdot \log(U) \cdot S(n,m))$ where $n$ is the number of nodes, $m$ is the number of edges on the network, $U$ is the amount to be delivered, and $S(n,m)$ is the time it takes to obtain a solution to the single source shortest path problem\cite{Minoux1986,ahuja1993network}. This is typically done using Dijkstra's Algorithm in time $O(m+n) \cdot \log(n)$, so that we arrive at a total runtime of $O(\log(U)\cdot(m^2+mn)\cdot\log(n))$. If the balance values were known, the decision if a payment between two nodes can be conducted could be arrived at by finding a max-flow / min-cut and comparing it to the amount that is to be delivered. Given the uncertainty of channel balances, the decision problem is much harder to solve and still involves a trial-and-error loop. We introduce a practical round-based algorithm that can either quickly deliver the payment or decide with a high probability that the min-cut is lower than the payment amount that is supposed to be delivered between sender and receiver. It starts by creating the most likely MPP split as the solution of the min-cost flow problem and sending out the partial payments. The algorithm reduces its uncertainty of the balance values by learning from the failures and successes. This is done by updating the prior probabilities after the failing onions have returned. Finally it creates another candidate MPP for the residual amount by solving a min-cost flow on the updated graph. \section{Payments as Integer Flows} \label{flows} Let $G = (V,E)$ be a directed graph and $u: E \longrightarrow \mathbb{N}$ a function assigning capacity values to all edges in the graph. For every node $v\in V$ let $b_v \in \mathbb{Z}$ denote its \emph{excess, supply or demand}. Typically $b_v$ will be $0$ except for the source node $s$ (with supply $b_v>0$) and the destination $d$ (with demand $b_v<0$). We call a function $f: E \longrightarrow \mathbb{N}_{0}$ a \emph{flow} if the following conditions hold: \begin{enumerate} \item \textbf{capacity constraint}: For every edge $e \in E$ we have: $$0 \leq f(e) =: f_{e} \leq u_{e} := u(e).$$ \item \textbf{flow conservation}: For every node $i\in V$ we have: $$\sum_{(i,j)\in E} f_{ij} - \sum_{(j,i)\in E} f_{ji} = b_i.$$ \end{enumerate} \subsection{Flows on a Known Balance Graph} Assuming the balance values of every payment channel of the Lightning Network were publicly known, one could create a flow network called \emph{balance graph} as follows: For any given payment channel between two nodes $i$ and $j$ with capacity $u$ we know that the balance $u_j$ of node $j$ plus the balance $u_i$ of node $i$ must be equal to the capacity $u$. On the balance graph we add two directed edges for the payment channel between the nodes $i,j$. First we add the edge $(i,j)$ with a capacity of $u(i,j) = u_i$ and then we add another edge in the opposing direction $j,i$ with a capacity of $u(j,i)=u_j$.\footnote{In reality, channel reserves would need to be accounted for by lowering the capacities to the spendable balances.} Observe that in this balance graph, a set of payment paths from $s$ to $t$ determines a flow simply by summing up the amount sent through any edge.\footnote{We abstract from the fact that the amount sent through a payment path diminishes along the path by the fees collected; we assume the total fees are included until the end. See Section~\ref{generalized} for a discussion of the impact of this relaxation.} In fact, the converse is also true: It is easy to see that any flow can be decomposed into paths from $s$ to $t$ and cycles in linear time~\cite[p.~79ff]{ahuja1993network}. Since cycles do not change supply or demand at any node, we can safely ignore them. In this way, we can represent any MPP split as a flow and vice versa. We emphasize that the paths of the decomposed flow that lead to the MPP split do not need to be disjoint. Given the balance values, we could decide the maximum possible amount that can be sent for any given node pair $s,t \in V$ by computing the minimal $s$--$t$-cut. Using, say, the Ford-Fulkerson algorithm\cite{ford_fulkerson_1956} one could compute a max-flow and disect it into a series of paths. These paths could then be used to construct several onions for a multi-part payment. In a preliminary test we use two different prior distributions to generate two static balance graphs and check on each how often the min-cut between arbitrarily chosen pairs of sender and receiver is actually determined by the minimum of the local outbound capacity of the sender and the local inbound capacity of the recipient. In Figure~\ref{fig:mincut} we can see that for both of these distributions --- which have been observed on the entire Lightning Network and an active core subset, respectively --- in only about $5\%$ of the payment pairs is the max-flow smaller than the maximum amount locally sendable by the payer and the maximum amount locally receivable by the target node. \begin{figure}[htpb] \center \includegraphics[width=0.45\textwidth]{img/maxFlowFraction.png} \caption{Showing the percentage of payment pairs where the maximal payable amount is actually lower than the upper bound given by the local balance known to both sender and recipient.} \label{fig:mincut} \end{figure} The fact that with publicly known balance values in $19$ out of $20$ payment pairs the amount that can be delivered is as high as the local limits of the payment pair is in stark contradiction to the currently observed and reported \cite{DBLP:journals/corr/abs-1911-09432,DBLP:journals/corr/abs-2006-14358,lange2021impact, pickhardt2021security} success rates. In fact, these are declining heavily with amounts larger than $100,000$ satoshi (1 BTC = 100,000,000 sat) for which the delivery should be almost always possible. We conjecture that this is due to the fact that in reality, the balance values are not publicly known. This forces us to take the total channel capacities $u$ as capacities on our flow network in both directions. Note that finding a max-flow on this network is not sufficient for deciding if a payment can be made. However, we can generalize the flow model to the case of uncertain balance values and will return to this question in Section~\ref{rounds}. \subsection{Uncertainty Networks} Earlier research~\cite{pickhardt2021security} has introduced a mathematical framework for handling uncertain balance values in payment channels with the goal of making path finding decisions that maximize the success probability of payment paths. We recall that --- given a prior belief about the balance uncertainty via a probability distribution $P_e$ and a random variable $X_e$ --- the channel success probability for a payment of size $f_e$ for a channel $e$ is expressed as $P_e(X_e\geq f_e)$. Whereas in~\cite{pickhardt2021security} the goal was maximizing the path success probabilities, here we aim to maximize the success probability for the entire flow. Assuming the channel balances to be independently distributed, this combined success probability is simply the product of all channel success probabilities: \[ P(f)=\prod_{e\in E}P_e\left(X_{e} \geq f_e\right) \] Any flow $f$ that maximizes the success probability $P(f)$ is also minimizing $-\log\left(P(f)\right)$ and vice versa. %The following sentence is a mathematical beauty and it would be really sad to have it removed (: Using the fact that the logarithm is a group homomorphism from the multiplicative group of positive real numbers to the additive group of real numbers we can write: \[ -\log\left(\prod_{e\in E}P_e(X_{e} \geq f_e)\right) = \sum_{e\in E}-\log\left(P_e(X_e \geq f_e)\right) \] The right hand side of the equation has the form of a separable cost function $C$ for the flow $f$ from the theory of min-cost flows: \[ C(f) := \sum_{e\in E}-\log\left(P_e(X_e \geq f_e)\right) \] Given fixed probability distributions for the channel balances, finding the most likely flow is therefore equivalent to finding a flow of minimum cost on this \emph{uncertainty network}. Any such flow can then be disected into the most likely multi-path payment as in the previous section. In general, finding optimal solutions to the min-cost flow problem with non-linear costs is NP-hard~\cite{guisewite1990minimum}. Fortunately, in the special case of integer flows and capacities together with a separable convex cost function a polynomial algorithm has been introduced by~\cite{Minoux1986}. Since our flows are integer-valued and the cost function is separable we need to understand when a cost function arising from channel success probabilities is convex in order to be able to apply such an algorithm. Because the cost function $C$ is separable we can test convexity independently for any given channel $e$ and the resulting cost function $c_e(f_e):=-\log\left(P_e(X_e\geq f_e)\right)$ After simplifying by assuming a flow value $x:=f(e)$ and defining $p(x):=P_e(X_x\geq x)$ we get: \[ c_e(x)=-\log(p(x)) \] Assuming this function is twice differentiable, it is convex iff its second derivative is be nonnegative on the entire domain. The first derivative is: \[ c_e'(x) = -\frac{p(x)'}{p(x)} \] and the second derivative is: \[ c_e''(x) = \frac{(p'(x))^2-p(x)p''(x)}{{p^2(x)}} \geq 0 \] In particular we see that the negative log probabilities result in a convex cost function iff the following inequality holds: \[ (p'(x))^2 \geq p(x)p''(x) \] In the uniform case $p(x)=\frac{u+1-x}{u+1}$ (cf.~\cite{pickhardt2021security}) we have $p'(x)=\frac{-1}{u+1}$ and $p''(x)=0$ demonstrating that the resulting cost function $c_e(x)$ is convex on its entire domain. This indicates that the polynomial algorithm can be used to find the flow that minimizes: \[ C(f)=\sum_{e\in E}-\log\left(\frac{u_{e}+1-f_e}{u_{e}+1}\right) \] Thus the most probable multi-part split for delivering the amount $U$ can be found by solving a min-cost flow problem and disecting the flow into paths. We do not explicitly handle fees in this model, but observe that if we can find a flow that includes an upper bound to the total fees, the real payment success probability will be at least as high as the one predicted by this model, since the transported amount is only falling along the paths and our probability function is monotonic. \subsection{Maximizing Success Probabilities vs Minimizing Fees} \label{fees} The current routing fee function on the Lightning network is a separable cost function depending only on the flow across each channel. However, it is easy to see that the function is not convex at the transition between flow $0$ (cost $0$) and flow $1$ (cost base fee plus unit flow cost), whenever the base fee is larger than the proportional unit flow cost. In fact, a cost function of this form is often called a \emph{concave fixed-charge cost} in the literature. Unfortunately, finding the flow that minimizes a cost function of this form is a strongly NP-hard problem as shown in~\cite{guisewite1990minimum} by reduction from 3-SAT to a min-cost flow problem with only fixed-charge costs. On the other hand, if the Lightning Network community were to drop the base fee, the separable cost function would become linear in the flow value of each arc. Finding an MPP split that minimizes routing fees could easily be done by solving the linear min-cost flow problem using any of a number of algorithms~\cite{ahuja1993network}. However, we note that minimizing the routing fees alone tends to saturate the full capacity of cheap channels. Such paths are highly improbable to succeed since they can only do so when the entire balance is on the right side of the channel (even ignoring channel reserves). In our opinion that makes optimizing purely for fees a poor choice in general. On the other hand, only maximizing the success probability might allow routing node operators and liquidity providers to game the algorithm and extract unlimited fees. So in practice, it should be our goal to both minimize fees and maximize success probabilities. Naturally, these goals can be contradictory, since node operators can and will choose fees freely. Two ways of expressing this goal might be \begin{enumerate} \item to minimize fees with a side constraint of achieving a certain minimal probability bound, or \item to maximize success probability with a side constraint of staying below a certain maximal fee bound. \end{enumerate} Unfortunately, both of these problems are weakly NP-hard via the following argument: First, observe that adding unit capacities to the problems and looking for a flow of size 1 makes them instances of the so-called constrained shortest-path problem. Then,~\cite[p.~798]{ahuja1993network} shows that this subclass of constrained min-cost flow problems is already NP-hard by reduction from the Knapsack problem. Fortunately, the picture is not quite as bleak as it looks on first sight. First, the reduction only implies weak NP-hardness, meaning that we could find a polynomial algorithm whenever $U=O(n^k)$ for some $k$. Strictly speaking, the flow size $U$ is always bounded by a constant in our applications, since the total number of bitcoin is limited. Looking into the theory of Lagrangian relaxation~\cite[p.~598ff]{ahuja1993network} methods, however, gives us immediate practical results instead of just hope. In fact, the two cases enumerated above collapse into one when we try to find bounds for them by applying a simple one-dimensional Lagrange multiplier, that is we try to minimize: \[ \sum_{e\in E}-\log(\frac{c_{e}+1-f_e}{c_{e}+1})+\mu\cdot f_e\cdot fee(e) \] the linear combination of both costs, with a suitable multiplicative constant $\mu$. By calculating this combined min-cost flow (note that the linear combination of the two cost functions remains convex), not only do we get a feasible flow of size $U$, but because of the \emph{Lagrangian bounding principle}~\cite[p. 605f]{ahuja1993network}, whatever total fee $x$ and success probability $p$ we achieve, we are guaranteed that this combination is optimal on both sides. That is, there is no solution with cheaper total fees than $x$ and at least probability $p$, and there is no solution with higher success probability than $p$ that still only costs at most $x$. So in case we are not satisfied with the parameters of the solution we got, finding an adequate solution is reduced to either increasing $\mu$ and getting a potentially cheaper, but less probable solution, or decreasing $\mu$ and receiving a more reliable but also more expensive solution. \subsection{Generalized Flows with Losses} \label{generalized} So far we have ignored the fact that every node on every path takes some part of the payment as a fee, which means that the total amount of flow gets smaller towards the target. This observation is best described by a slightly more general model called \emph{generalized (minimum cost) flows with gains and losses}. In this formulation, the flow conservation condition for any node $i\in V$ is changed to $$\sum_{(i,j)\in E} f_{ij} - \sum_{(j,i)\in E} \gamma_{ji}f_{ji} = b_i.$$ Thus, when we send 1 unit of flow along an edge $(i,j)$, $\gamma_{ij}$ units of flow arrive at node $j$. The edge multipliers $\gamma_e$ are positive rational numbers\footnote{In the Lightning Network, these correspond to the proportional part of the fee. Again, including the base fee makes solving the problem infeasible, which is why we propose abolishing it.} and the edges are called \emph{gainy} ($\gamma_e > 1$) or \emph{lossy} ($\gamma_e < 1$) accordingly. Notice that until now, this formulation still depends on us knowing the exact supply/demand amounts at the source and destination nodes. This is especially troubling here, because we cannot just use an upper bound to the supply as before: there might not be a solution that uses the corresponding exact amount of fees. Therefore we introduce a cost-free high-capacity gainy (say, $\gamma_s=2$) self-loop at the source node and set the source excess to zero. This allows for introduction of an arbitrary amount of flow at the source. Then, we aim to minimize the convex cost function as before under the remaining flow and capacity constraints. The generalized flow problem is clearly a proper generalization of the min-cost flow problem outlined above. Unfortunately, it also appears to be harder to solve. The algorithm that we implemented for the min-cost flow problem does not seem to carry over to this more general problem. We did find pointers to some approaches~\cite{tsengbertsekas} that might be worth exploring. So far, we have been reluctant to invest too much effort in this direction, because in our application, the fees are generally expected to be a small fraction of the total flow. Thus it is doubtful if the greater computational effort will be worth the slightly more favorable probability/fee result. \section{Payment Algorithm} \label{rounds} Once we are able to efficiently compute minimum cost flows optimizing either for success probabilities, fees or both using the Lagrange relaxation we naturally arrive at a round-based payment algorithm that can be used by a node $s$ that wishes to send an amount $U$ to a destination $d$. For now, we assume a Lagrange-style combination between channel success probabilities and the linear fee rate function, as we believe this achieves the most useful results. The round-based algorithm is initialized by the sending node $s$ in the following way: It starts by creating a new uncertainty network model $N$ of the Lightning Network which encodes the initial uncertainty and information $s$ is gaining about the balance distribution on the network during the rounds. %TODO: put back in? Thus the network will be updated during the process of delivering the payment during the following rounds. In order to deliver the full amount, the node $s$ will have to solve a minimum cost flow problem, send out onions and update the uncertainty network based on the successes and failures of the onions in each of the rounds. The uncertainty network $N$ consists of all the nodes that are on the same connected component as $s$ on the channel graph. The edges of $N$ correspond to payment channels on the Lightning Network. If $s$ has no further knowledge about the channels, a directed arc for both directions of each payment channel is added to the uncertainty network. The capacities of the edges are set to the capacities of the payment channel (possibly deducting channel reserves). Notice for example that in the local channels of $s$ the balance values are known and there is no uncertainty. Thus the capacity for those channels is set to the local balance value $u$ as this is the most that can currently be sent on those channels. The probability is set to $1 = P(X\geq a | X=u)$ for any amount $a$ between $0$ and $u$. This results in a negative log probability of $0$ and thus makes it very cheap for the minimum cost flow computation to utilize the liquidity in this channel. In particular since the node $s$ also does not have to pay any fees to itself. Similarly, the receiving destination node $d$ could tell the sending node about the state of its local channels and this knowledge could also be incorporated into the graph by creating edges with 0 log probabilities and decreased capacities.\footnote{Communicating this information in invoices is currently not part of the protocol but routing hints in the invoices might be extended to encode such information.} In Figure~\ref{fig:mincut} we have demonstrated that for about $95\%$ of all payment pairs the amount that can be delivered through the network is actually limited by the local outbound capacity of the sender and the local inbound capacity of the receiving node which yields another motivation for this information to be shared. After the setup phase the round-based phase starts. Here the algorithm iterates over the following steps until either the entire amount is delivered or the minimum cost flows become too unlikely or cannot be computed for the residual amount, which means the minimum $s$--$d$-cut has been discovered.\footnote{In such cases it seems reasonable that $s$ open a new payment channel with $d$ for at least the remainder amount.} The round starts with $s$ computing a minimum cost flow for the amount $U$ to $d$ following the optimization goal. The flow is then decomposed into paths and cycles. Note that cycles cannot appear in our application as long as we do not allow negative cost edges. If they appeared, they would indicate profitable rebalancing options that may or may not be reachable for $s$. Since negative fees are not allowed in the BOLT standards, we can safely ignore cycles for now. The node then conducts a multi-part payment by concurrently sending out onions along the resulting paths. In practice one has to chose a decomposition of the flow into paths that does not create more HTLCs on a remote channel than the HTLC limit encoded into the protocol permits. This engineering challenge as well as others like the question of channel reserves are ignored here for simplicity of the presentation. Despite the fact that the most likely flow was used, some of the onions will not reach the target in most cases. So the sending node gathers information from the error codes of failed attempts as previously described in~\cite{tikhomirov2020probing} as well as information from the paths that have not returned an error to update the probabilities as described in~\cite{pickhardt2021security}. This step decreases the uncertainty of the channel balances and is crucial for the improvement and different results in the next round which is why we explicitly explain how the knowledge is updated in several cases. \begin{enumerate} \item If an onion with the amount $h$ has not been returned, we assume it has arrived at the destination. Thus all channels across the path have now locked some liquidity into an HTLC.\@ In our uncertainty network we thus reduce the capacity $u$ of each involved channel by the amount $h$ that was sent along that channel on that path. This changes our future success probabilities for the amount $a$ to be $P(X\geq a+h|X\geq h)$ which corresponds to a change from $\frac{u+1-a}{u+1}$ to $\frac{(u-h)+1-a}{(u-h)+1}$ in the uniform case. For any value of $a$ and positive $h$ the second fraction is smaller that the first one. This leads to lower probabilities which in turn yields higher costs to use those channels again in follow up rounds. \item If an onion of size $h$ fails we learn the following new information: \begin{enumerate} \item On every channel up to the failed channel there has been sufficient liquidity to forward the amount $h$. In future rounds we can use the conditional probability $P(X\geq a|X\geq h)$. For $a<=h$ this term will be $1$, resulting in log costs of $0$. For $a>h$ the conditional probability is uniform again. It is important to notice that the conditional probability will again lead to a convex cost function. \item Assuming the failure code is related to liquidity issues for the failed channel we know that the amount $h$ was not available. Thus we are able to reduce the capacity to $h-1$ and change the probability for the future rounds from $\frac{u+1-a}{u+1}$ to $\frac{h-a}{h}$ in the uniform case, or $P(X\geq a|X < h)$ in general. This probability decrease will result in higher costs for utilizing this channel. \item If on the other hand the failure code is related to an issue with the next hop, for example a downtime, the node can update its local view by temporarily removing the failed node with all its channels. \end{enumerate} \end{enumerate} Note that both successful and failing onions would also allow us to update the knowledge about the balances in the reverse direction. Once all the knowledge from the partial attempts of the round has been learned --- which is reflected by the update of the probabilities and cost functions --- the algorithm enters the next round. First, the sum of all failed onions is computed as the residual amount that still needs to be delivered. We enter the next round with that amount on the updated uncertainty network by computing a new minimum cost flow to generate the next optimal MPP split. \section{Example} Let us go through an example that illustrates why finding a min-cost flow is necessary instead of just adding optimal paths. It also serves to better understand the round-based algorithm. In order to simplify the example, we ignore fees and channel reserves and optimize purely for probability here. We also hide the nonintuitive negative log probabilities by writing down the corresponding max-probable flow with probabilities as edge weights. For the channel graph depicted in Figure~\ref{fig:counterexample}, \begin{figure}[htpb] \center \includegraphics[width=0.45\textwidth]{img/counterexample.png} \caption{Example Channel Graph on which the $2$-flow with maximal probability is not an extension of the $1$-flow from $s$ to $d$ with maximal probability. The success probabilities for sending $i$ sat are depicted as $p_i$. The channels capacities are the bold numbers along the edges.} \label{fig:counterexample} \end{figure} assuming uniform probability distributions we compute the following probabilities for the $1$-flows (paths delivering 1 sat): \[ \begin{aligned} p([s,A,B,d]) = & \frac{2}{3}\cdot\frac{2}{3}\cdot\frac{4}{5}=\frac{16}{45} &= 0.35\overline{5} \\ p([s,X,Y,d]) = & \frac{1}{2}\cdot\frac{7}{8}\cdot\frac{4}{5}=\frac{28}{80} &= 0.35 \\ p([s,X,B,d]) = & \frac{1}{2}\cdot\frac{9}{10}\cdot\frac{4}{5}=\frac{36}{100} &= 0.36 \\ \end{aligned} \] This indicates that $s,X,B,d$ is the minimum cost $1$-flow. The $(s,X)$ arc is obviously saturated so that a $2$-flow extending the $1$-flow would have to go via the $(s,A)$ arc. One can easily compute the probability of the resulting $2$-flow $f^2$ if the next sat is also using the $(B,d)$ channel and being merged with the min-cost $1$-flow as \[ p(f^2) = \frac{1}{2}\cdot \frac{9}{10}\cdot \frac{2}{3}\cdot \frac{2}{3}\cdot \frac{3}{5} = \frac{3}{25}=0.12 \] However if we look at the $2$-flow $g^2$ that sends $1$ sat along $s,A,B,d$ and $1$ along $s,X,Y,d$ we get $p(g^2)=\frac{16}{45}\cdot\frac{28}{80} = 0.124\overline{4}$ which is also the min-cost $2$-flow in this graph. This example shows that finding a min-cost flow cannot in general be done by computing the most likely path for a single sat and extending it with the next most likely $1$-sat-paths.\footnote{This simple $+1$-algorithm could actually be rescued so that it would be able to compute the min-cost flow. However both versions would also be quite slow as they would be linear in the amount that was to be sent --- which is exponential in the input size} Extending our example and assuming we want to send a total of 3 sat we start again by computing the min-cost flow $f^3$ which can be disected into two paths $l_1= s,X,Y,d$ with an amount of $1$ and another path $l_2=s,A,B,d$ with an amount of $2$. After sending out the onions we might have learned that the onion along $l_1$ has been successful, but the one with $2$ sat along $l_2$ has failed because $B$ did not have enough liquidity to forward the onion to $d$ on the $(B,d)$ channel. For the second phase of the algorithm we now compute the min-cost flow on a graph where we know that we can deliver 2 sat with perfect certainty to $B$. This updated uncertainty network is depicted in Figure~\ref{fig:round2} \begin{figure}[htpb] \center \includegraphics[width=0.45\textwidth]{img/round2.png} \caption{The uncertainty network of $s$ after HTLCs of value $1$ are locked in along the path $s,X,Y,d$ and a 2-sat onion along $s,A,B,d$ failed because of missing liquidity on the $B,d$ channel. Saturated edges are removed. The black labels on the edges depict the uncertainty range of the balance or a single number if the balance is known to $s$. The $p_i$ express the success probability for sending a further $i$ sat along an edge given the updated knowledge. } \label{fig:round2} \end{figure} The flow that sends the full residual amount of $2$ sat along $s,A,B,X,Y,d$ has a probability of $\frac{8}{10}\cdot\frac{5}{7}\cdot\frac{1}{2}=0.286...$ while the flow that sends $1$ sat along the $B,d$ channel has a probability of $\frac{1}{2}\cdot\frac{9}{10}\cdot\frac{6}{7}\cdot\frac{3}{4}=0.289...$ telling us that for the residual amount we should make another split payment sending one sat along each of the paths $q_1=s,A,B,d$ and $q_2=s,A,B,X,Y,d$. Finally, if the path $q_1$ locks in and $q_2$ returns an error at any of the channels $(B,X),(X,Y)$ or $(Y,d)$ we would know that we cannot deliver the full payment as the min-cut in the network on the balance graph had the value $2$. If however $q_2$ and $q_1$ both lock in we have successfully delivered the payment. If both $q_1$ and $q_2$ return an error we know the min-cut between $s$ and $d$ on the balance graph was $1$ (as that had been locked-in in the first round and no further HTLCs have locked in). Finally if $q_1$ returns an error and $q_2$ locks in we will have to enter the third round. In the third round there is only 1 sat to be delivered on a single possible path $q_2$ which, given our knowledge, has a success probability of $\frac{8}{9}\cdot\frac{5}{6}\cdot\frac{2}{3}=0.494\ldots$\footnote{the numerical similarity to the twitter account mentioned in the acknowledgements is completely coincidental as we used that graph even before we had the discussion with the individual.}. \section{Anecdotal Lab Report} \textbf{Disclaimer:} We stress that this document is a preprint. In particular this section cannot be considered a proper evaluation. For that we would also have to test the algorithms on the Lightning Network mainnet. In addition to the fundamental complications arising through the base fee\footnote{which should be easily avoidable at the moment by incorporating some buffer in the fee size, as long as it remains lightly used}, such a real-world test requires overcoming several engineering challenges that we did not have the time to address yet: A practical implementation needs to automatically answer questions that might arise in case of hanging HTLCs, if channels become inactive, or amounts reach HTLC limits of channels, for example. Also the implementation of the min-cost flow solver would have to be engineered to have a much faster runtime than our experimental version so that it would actually be feasible to use it on the real Lightning Network. However we felt the need to share some preliminary research progress after some test results from our simulated setting indicated an improvement of reliability of several orders of magnitude over the currently reported statistics for payment delivery. In particular, an anecdotal lab report --- while highly unusual --- seemed appropriate for this particular situation in order to inform the developer community as early as possible about the potential need for a protocol upgrade removing the base fee. \subsection{Simulation} Since the computation with base fees is not feasible\footnote{and the fees are also currently low enough to not impact the results too much}, we have ignored the base fees in all our computations. In fact, we started experimenting by not even optimizing for low fees at all but just for high probabilities. We took a recent snapshot from the channel graph of the Lightning Network that was observed via the gossip protocol on one of our long running mainnet nodes. We then created a static balance graph instantiation of the simulated network by splitting the channel capacity into two balance values uniformly at random independently for each channel. We created a Python-based min-cost solver following the algorithm described in~\cite[p.~556ff]{ahuja1993network} and a Scala version later on that turned out to be faster by a factor of about $3$--$4$. While implementing this algorithm we made some mistakes early on that accidentally led to the discovery of a heuristic that, on our snapshot Lightning Network graph, reliably produced results with less than 1\% deviation from the optimal cost in less than 1\% of the runtime. Because the exact algorithm takes more than 30 minutes even in the faster Scala implementation, the following results have been mostly obtained with this heuristic, which typically takes about 6 seconds to run in Scala. So it is notable that the optimal results would be even better, although negligibly so. We picked a medium-sized Lightning node that got randomly assigned a local balance of $0.42$ BTC and tried to send $0.4$ BTC to another node $3$ hops distant using the round-based algorithm described in Section~\ref{rounds}. The remote node had a total capacity of roughly $1.5$ BTC and more than $0.4$ BTC inbound liquidity. Assuming no routing hints from the recipient we started the first round computing a flow that was disected into several hundred small paths. Sending them out we where able to deliver almost $75\%$ of the amount that we wanted to deliver at once. We updated the graph with the insights from the successes and failures and started the second round for the residual amount of roughly $0.1$ BTC. In this round the min-cost solver on the graph with less uncertainty suggested a split of about $100$ paths. After sending out the payments we observed that the residual amount was only about $0.009$ Bitcoin. In the third round, again on the updated graph, the min-cost solver suggested to send about 15 concurrent payments, of which all but one where successful. We entered the fourth and final round with an amount of $30,000$ sat ($0.0003$ BTC) remaining. Owing to the learned data, the heuristic of our min-cost solver sent the full amount on a single path with $8$ hops, because it had already gained enough certainty for all but one channel (of size 1 BTC) along that path that it could forward $30,000$ sat. Thus with a $99.97\%$ probability the $30,000$ sat path settled on the selected $8$ hops path and the payment was delivered in full. While sending out all the onions we tracked the total routing fees to be $814$ sat. We also repeated the experiment with reduced initial uncertainty by assuming the recipient node had initially communicated to the sender on which channels it could receive what amounts in the invoice. In this case and on the same graph the algorithm delivered the final payment in the third round already. We repeated the experiment a couple of times with different amounts and different instantiations of the simulated balance graph, resulting in similar results with every run. We therefore believe it reasonable to expect that we would see very similar results on the actual Lightning Network even though the unknown balance graph of the mainnet is constantly changing (potentially making some of our learned knowledge invalid). We also used the above combination of balance graph, source and destination node for some experiments with the Lagrangian relaxation. This time, we allowed multiple parallel channels between nodes, as they are actually observed in the Lightning Network. Also, instead of going through the rounds of the payment algorithm, we just looked at the results of a single min-cost flow calculation, with a payment amount of 9.2 million sat (0.092 BTC). In this setting\footnote{without using any knowledge about the source's or the destinations' channels}, optimizing for reliability only ($\mu=0$) yields a probability of $P=0.16$, with total fees of 697 sat (excluding base fees). On the other hand, choosing $\mu=100$ means optimizing almost exclusively for fees. This brings the total fees down to $16$ sat, but, as expected from the arguments in Section~\ref{fees}, the success probability drops to $P=1.1\cdot10^{-11}$. Table~1 shows that when we lower $\mu$ through multiple orders of magnitude, the success probability increases drastically, while the fees are only rising moderately. Unfortunately, we increasingly observed numerical instabilities while decreasing $\mu$. This lead to our algorithm not terminating beyond $\mu=0.01$. \begin{table} \begin{center} \begin{tabular}[h]{l|l|r} $\mu$ & P & fee(sat) \\ \hline 100 & $1.1\cdot10^{-11}$ & 16 \\ 10 & $2.3\cdot 10^{-5}$ & 16 \\ 1 & $0.0097$ & 18 \\ 0.1 & 0.044 & 24 \\ 0.01 & 0.056 & 28 \\ 0 & 0.16 & 697 \end{tabular} \caption{Results of varying $\mu$ on a fixed payment pair} \end{center} \label{table:mu} \end{table} Again, all numbers are from the heuristic, but the exact algorithm performed nearly identically in all our samples, including the numerical instabilities. \subsection{Source code} We could not find any preexisting open source software implementing the solution of the integer minimum cost flow problem for arbitrary separable convex cost functions. We therefore share the source code of the described algorithms and methods as well as the latex sources of this document with an open MIT license at: \url{https://github.com/renepickhardt/mpp-splitter}. This repository consists of Scala- and Python-based example implementations of the exact min-cost flow algorithm described in~\cite[p.~556ff]{ahuja1993network} for a separable convex cost function. It also includes a minimalistic simulation framework in Python to test the practicality of the round-based payment loop. The Scala version includes example code to demonstrate the usage of the Lagrangian relaxation. \section{Advanced Applications} \subsection{Multiple Senders and/or Receivers} Notice that our definition of a flow in Section~\ref{flows} allows for an arbitrary number of both sources and sinks, that is, nodes with non-zero excess. This means that while a min-cost flow calculation might be computationally expensive, it can result in an optimized flow for multiple payments and/or channel balancing efforts at the same time. With respect to the runtime of the algorithm we have implemented\footnote{which relies on single source shortest path calculations}, a more complex flow will take longer to optimize in practice, even though it will still respect the same worst-case runtime bounds. We expect that entities like Lightning Service Providers (LSP) or trampoline routers, who need to find paths for many payments, will find this aspect helpful. One could imagine a permanently running min-cost flow calculation loop that keeps learning about the network and sending out remainder amounts as in Section~\ref{rounds}, but can always add additional payments in the next round. It bears mentioning that in such multi-purpose rounds, the minimum cost is always optimized globally, which could lead to some payments being cheaper at the expense of others. This needs to be accounted for when, e.g., routing payments for multiple clients. \subsection{Optimal Channel Rebalancing} \label{rebalancing} It is well-known in the community that routing nodes can contribute to the overall payment reliability in the Lightning Network by using various channel rebalancing techniques. A recent survey paper~\cite{papadis2020blockchain} in particular describes channel rebalancing via off-chain circular onions. This can happen proactively or lazily at routing time via a technique called just in time routing (JIT-routing)\cite{Pickhardt2019}. To our knowledge, rebalancing has so far only been considered one channel pair at a time. We observe that a node $i$ might want to rebalance several channels at once by shifting excess balance from source channels to target channels where more liquidity would be demanded. In the uncertainty network, we can then assign the supply for rebalancing to the channel partner nodes of the corresponding outbound supply channels and remove these edges from the graph. Because we have to account for the fees on inbound channels, the construction is a little more involved for the channels that demand extra balance: For every incoming edge $(j,i)$ that demands a balance increase, we create a copy $(j,k)$ (with the same capacity and cost function) leading to a new node $k$ that is assigned the demand. Finally, we can compute a multi-source-multi-sink min-cost flow in order to shift the liquidity and conduct a multi-channel rebalancing. As rebalancing is rarely time critical, one might prefer a high value of $\mu$ in the min-cost flow computation that favors low fees over a high success rate. A node might even decide to stop the rebalancing operation before all of the onions have been successfully delivered: it has just engaged into a cheap opportunity for rebalancing; if delivering the remaining amounts turns out to be too expensive in the next min-cost flow calculation, it might prefer to stay with this improved but not perfect balance according to its own rebalancing strategy. At first sight, rebalancing seems most interesting for nodes that engage more in routing than sending or receiving payments. However, we want to stress that for LSPs who conduct several payments per second, it might be very reasonable to combine the rebalancing and payment aspect, and suggest two ideas. First, we recall the global uncertainty of a node is always decreasing while delivering payments, so learning this knowledge could help find opportunities for engaging in rebalancing operations. Second, an LSP might aim to keep its channels balanced according to a certain strategy. Instead of allowing itself to use all channels for making a payment, an LSP could restrict itself only to the channels where it has too much liquidity and exclude other channels from the computation. This min-cost flow might be more expensive and less likely, but it might increase the chances for the node to forward payments; potentially earning a fee and increasing its overall reliability for other nodes might make such a trade-off worthwhile. \section{Limitations} \begin{enumerate} \item As discussed in Section~\ref{fees}, the currently adopted base fees in the Lightning Network make computing a min-cost flow NP-hard whenever the cost function includes these fees. Thus we have inquired about the motivation for the inclusion of a base fee online and received a response from the developer who appears to be responsible for this decision.\footnote{Rusty Russell's answer to the question: Why was the base\_fee for the routing fee calculation of the Lightning Network included? \url{https://bitcoin.stackexchange.com/a/107341/81940}} We are under the impression that the base fee was a rather ad-hoc and arbitrary choice and is not of significant importance to the Lightning Network protocol. Even if it were too difficult to change the protocol we see clear incentives for node operators setting the base fee to zero: Nodes who want to conduct path finding might ignore base fees in their path computation in the future and thus ignore channels with a non-zero base fee. Furthermore, we note that our approach favors channels with large capacity, whose operators might therefore be able to demand higher fees. We conjecture that this will give rise to a more balanced fee market, which should be in the interest particularly of node operators who have invested significant liquidity. \item We made the crucial assumption that channel balances are uniformly distributed. While this was confirmed by prior research~\cite{pickhardt2021security}, the situation could evolve over time, making our priors less suitable. If, say, we assumed a normal distribution, the negative log probabilities would not be convex on the entire domain and the min-cost flow problem might not be solvable in polynomial time. However, we note for future research that there are practical solutions for this, like limiting the domain by limiting the allowed channel capacity. Moreover, in~\cite{pickhardt2021security} it has also been shown in the single path case that adopting a rebalancing protocol which changed the prior to a normal distribution but computing paths with a uniform prior still performed well. In fact, we conjecture that we cannot do better than assume uniformity unless we have knowledge of the parameters of the actual distribution. \item While the min-cost flow problem admits a polynomial time solution in the case of a convex cost function (without base fees), it is still computationally intensive, with a runtime that is quadratic in the number of channels. Remember also that we might have to solve several of these problems per payment round in order to find a suitable Lagrange multiplier in the trade-off between reliability and fees. Our prototype implementation is currently not optimized for speed; on the full Lightning Network graph, running times can easily reach 30 minutes and more, depending on the degree of precision. However, we have achieved preliminary experimental results on a promising heuristic that seems to very favorably trade reductions in runtime for only a slight deterioration from the optimal solution of the minimum cost flows. Using heuristics like this, algorithmic methods including approximation and parallelization, as well as optimized code, we estimate that on the currently public Lightning Network it should be feasible to achieve sub-second runtimes on commodity hardware. However, we feel that this research is still too early for publication. \end{enumerate} \section{Future work} Beyond the optimization steps required for practical usability and extensive tests on the actual Lightning Network that we have hinted at above, we see some additional directions for further research: \begin{enumerate} \item Recall from Section~\ref{fees} that when optimizing for both fees and reliability one has to find a suitable value for $\mu$. We hope that improving on the current Lightning Network user experience will be possible with some experimentation, user interface design, and drawing from the extensive literature on Lagrangian relaxation. \item In the round-based payment algorithm described in \ref{rounds}, the optimal delay spent waiting for further responses after the first failed onion has returned and before entering the next round with updated knowledge remains an open question. In our experiments and simulation we have assumed waiting for the status of all onions to be resolved. We defer investigation of this question to practical experimentation but point out that it does not appear too critical since we can always incorporate information that arrives too late for one round in any later ones. In the extreme case, for every returned onion a new round could be entered. \item It is conceptionally straight forward to extend the probabilities with provenance scores from operating nodes on the Lightning Network. Instead of just looking at the channel balance distribution one could create a joint distribution from, e.g., estimating the nodes' uptime. \item Large channels could become the equivalent to the autobahn and attract a lot of traffic. Given the limitations of concurrent payments on the Lightning Network, the need for congestion control mechanisms might arise. \item The payment planning and execution algorithm accumulates knowledge about the actual channel success probability distributions. However, we know that in practice those distributions do not stay constant. In fact, they change with every payment that is conducted through a particular channel or via the rebalancing behavior of node operators. Further research into the dynamics of the money flow through the Lightning Network will help estimate how long to rely on the knowledge gained from previous payment attempts. This knowledge can then be adequately discounted or forgotten in future rounds. \item The promising idea of adding redundant liquidity during the path finding phase of MPP splits was introduced by~\cite{DBLP:journals/corr/abs-1910-01834}. It would be very interesting to study if we could find optimal redundant overpayments so that we can expect to finish within one round of sending out multi-part payments with high probability. \item In spite of the arguments given in Section~\ref{generalized} that led to prioritizing other aspects first, we still believe generalized flows to be an interesting future research direction, especially in light of multiple-source-multiple-sink applications (cf. Section~\ref{rebalancing}). In this setting, constructions like the self-loop we described might allow for more flexibility: One can specify some sources and destinations with exact excess requirements as well as leave others open in order to give the optimization more leeway. \end{enumerate} \section{Acknowledgements} This research was partially sponsored by the Norwegian University of Science and Technology (NTNU). We want to thank David Karger and MIT open courseware for sharing a lecture series about min-cost flows\footnote{\url{https://www.youtube.com/playlist?list=PLaRKlIqjjguDXlnJWG2T7U52iHZl8Edrc}} that has been very useful for us in creating our reference implementation and understanding the theory of min-cost flows. We are grateful to Ravindra Ahuja, Thomas Magnanti and James Orlin for their exceptional textbook~\cite{ahuja1993network} which contains so much of the knowledge we have been adapting to our use case here. We also thank Twitter user \texttt{@four9four} for discussing our preliminary results after the Lightning hacksprint in April 2021. Thanks to GB for polishing the graphics. We thank Rene's co-authors Andreas M. Antonopoulos and Olaoluwa Osuntokun for accepting Rene's decision to temporarily prioritize this research over their Lightning Network book project. Finally our gratitude goes to Christian Decker with whom we had several discussions and who provided valuable feedback along the course of this research. If you like the idea of decentralized and independent research and development of the Lightning Network feel free to support future work by contributing via \url{https://donate.ln.rene-pickhardt.de}. \bibliography{mppSplitting} \bibliographystyle{plain} \begin{appendix} \section{Twitter-based TL;DR} While putting all together we actually shared the method and results in 6 Tweets: \\\\ \textbf{March 17th 2021}\footnote{\url{https://twitter.com/renepickhardt/status/1372169686251626499}} \\ Over the last year I have been making quite some discoveries about \#bitcoin Payment Pathfinding on the \#LightningNetwork .\\A paper which introduces a probabilistic approach of modeling the uncertainty of remote channel balances is out and discussed on \url{https://lists.linuxfoundation.org/pipermail/lightning-dev/2021-March/002984.html}\\\\ \textbf{April 22nd 2021}\footnote{\url{https://twitter.com/renepickhardt/status/1385144337907044352}} \\ Multipathfinding \begin{enumerate} \item capacities are integers (Satoshis) \item channel success probabilities -log(1-x/c) are convex functions \item Solving integer minimum cost flows with separable convex cost objective polynomially: \url{https://link.springer.com/chapter/10.1007\%2FBFb0121104} \end{enumerate} Kudos @stefanwouldgo 4 digging this out \\\\ \textbf{May 26th 2021}\footnote{\url{https://twitter.com/renepickhardt/status/1397559345139888137}}\\ most likely MPP-split to deliver 92 mBTC from my lightning node (03efc...) to (022c6...). We split into 11 onions! \begin{figure}[htpb] \center \includegraphics[width=0.45\textwidth]{img/optflow.jpeg} \label{fig:optflow} \end{figure}\\ With knowledge of my own balance \& routing hints in the invoice the total likelihood to deliver all 11 onions is $64.84\%$ Note the 6 hop path with 4 WUMBO channels! \\\\ \textbf{June 6th 2021}\footnote{\url{https://twitter.com/renepickhardt/status/1401514950984712198}}\\ Couldn't go to Miami so I coded up my algorithm!\\\\ In a simulated network with realistic balance values a node having 0.42 BTC could send 0.4 BTC to a remote node (that could receive up to 1.59 BTC) with no direct channel\\\\ It took just 4 attempts to deliver and 814 sat in fees\\\\ \textbf{June 8th 2021}\footnote{\url{https://twitter.com/renepickhardt/status/1402264479677693958}}\\ on other good news: \begin{enumerate} \item this yields another test vector for unit tests \item the previous / non optimal algorithm might for other reasons (that go beyond a tweet) actually be better suitable for the lightning network after all \item either way the issue seems fixable (: \end{enumerate} \textbf{July 5th 2021}\footnote{\url{https://twitter.com/stefanwouldgo/status/1412158904008646660}}\\ Lightning Routing IS NP hard, though. It's funny 'cause it's true. \end{appendix} \end {document}
{ "alphanum_fraction": 0.7888229889, "avg_line_length": 105.9380530973, "ext": "tex", "hexsha": "f864a53267a49b5bcd0cdd6c74653ecd7e8bb673", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "74b7634bb4c21e62c24865906a83fd391e4e06bf", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "drmartinberger/mpp-splitter", "max_forks_repo_path": "paper/mppSplitting.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "74b7634bb4c21e62c24865906a83fd391e4e06bf", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "drmartinberger/mpp-splitter", "max_issues_repo_path": "paper/mppSplitting.tex", "max_line_length": 1234, "max_stars_count": null, "max_stars_repo_head_hexsha": "74b7634bb4c21e62c24865906a83fd391e4e06bf", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "drmartinberger/mpp-splitter", "max_stars_repo_path": "paper/mppSplitting.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 13550, "size": 59855 }
\documentclass{article} %Need this. \usepackage{amsmath,amsthm,amssymb} \newtheorem*{thm}{Theorem} \newtheorem*{cnj}{Conjecture} \newtheorem*{lem}{Lemma} \newtheorem*{cor}{Corollary} \newtheorem*{prop}{Proposition} \newcommand{\N}{\mathbb{N}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\R}{\mathbb{R}} \title{Practice} \author{--Name --} \date{} \begin{document} \maketitle %This will add the title, author, and date located above \begin{thm} If $x$ and $y$ are odd integers then $x+y$ is an even integer. \end{thm} \begin{proof} We assume that $x$ and $y$ are odd integers and will prove that $x+y$ is an even integer. Since $x$ and $y$ are odd, there exist integers $m$ and $n$ such that $x=2m+1$ and $y=2n+1$. By substitution and algebra we obtain \begin{align*} x+y &= 2m+1 + 2n + 1\\ &= 2m+2n+2\\ &=2(m+n+1). \end{align*} Define $q=m+n+1$. Since $m$ and $n$ are integers and the integers are closed under addition, we conclude that $q$ is an integer. Since $x+y=2q$ for the integer $q$ we conclude that $x+y$ is an even integer. \end{proof} \vspace{.15in} \section*{Challenge Typing} Suppose that $f:(-1,1)\to \R$ and $f$ is differentiable at $0$. Let sequences $(\alpha_n)_{n\geq1}$ and $(\beta_n)_{n\geq1}$ satisify $-1<\alpha_n<\beta_n<1$ for all $n\geq 1$ and $\displaystyle{\lim_{n\to\infty}} \alpha_n = \displaystyle{\lim_{n\to\infty}} \beta_n = 0$. Set $$\lambda_n = \frac{f(\beta_n) -f(\alpha_n)}{\beta_n-\alpha_n} . $$ \newpage \begin{thm} The set $\{ x\in\Z : |x-2.5| = 2 \}$ is the empty set. \end{thm} \begin{proof} Let $y$ be an integer such that $y \in \{ x\in\Z : |x-2.5| = 2 \}$ Then $y\in \mathbb{Z}$ and $|y-2.5| = 2$. Since $|y-2.5| = 2$ then $y = 4.5$ or $y=-.5$. But then $y$ is not an integer. Therefore the set $\{ x\in\Z : |x-2.5| = 2 \}$ has no elements and \[\{ x\in\Z : |x-2.5| = 2 \} = \emptyset.\] \end{proof} \begin{thm} There exist two positive irrational numbers $s$ and $t$ such that $s^t$ is rational. \end{thm} \begin{proof} We will consider two cases. For the first case, suppose that $\sqrt{2}^{\sqrt{2}}$ is rational. Then we may take $s= t = \sqrt{2}$. For the second case, suppose that $\sqrt{2}^{\sqrt{2}}$ is irrational. Let $s= \sqrt{2}^{\sqrt{2}}$ and $t=\sqrt{2}$. Then $$\left(\sqrt{2}^{\sqrt{2}}\right)^{\sqrt{2}} = \left(\sqrt{2}\right)^2 = 2.$$ Since $2$ is rational, $s^t$ is rational. Therefore, there exists irrational numbers $s$ and $t$ such that $s^t$ is rational. \end{proof} \begin{thm} Let $n$ be a natural number. Then $$\sum_{k=1}^n k = \frac{n(n+1)}{2}.$$ \end{thm} Consider the following matrix, $$\left( \begin{array}{ccc} 1 &2 &3\\ 4 &5 &6\\ 7 &8 &9 \end{array}\right)$$ \end{document}
{ "alphanum_fraction": 0.6352853966, "avg_line_length": 32.119047619, "ext": "tex", "hexsha": "4a23ec232e47600cb2976d9ff706fee80a45ac6e", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "4038b6d102000f4eeb27adaa8d0fd2bde63c28ac", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "mkjanssen/discrete", "max_forks_repo_path": "from LDK/LaTeX/Class/ReproduceInClass.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "4038b6d102000f4eeb27adaa8d0fd2bde63c28ac", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "mkjanssen/discrete", "max_issues_repo_path": "from LDK/LaTeX/Class/ReproduceInClass.tex", "max_line_length": 276, "max_stars_count": null, "max_stars_repo_head_hexsha": "4038b6d102000f4eeb27adaa8d0fd2bde63c28ac", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "mkjanssen/discrete", "max_stars_repo_path": "from LDK/LaTeX/Class/ReproduceInClass.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1075, "size": 2698 }
\subsection*{MORR.Shared.Configuration} \begin{interface}{IConfiguration} \clsdiagram[scale = 1]{resources/Classes/Common/Shared/Configuration/IConfiguration.png} \clsdcl{public interface IConfiguration} \clsdsp{Encapsulates a self-contained unit of configuration.} \begin{methods} \begin{method}{void Parse(RawConfiguration configuration)}{Parses the configuration from the provided value.} \begin{parameters} \para{RawConfiguration configuration}{The configuration to parse from} \end{parameters} \end{method} \end{methods} \end{interface} \begin{class}{RawConfiguration} \clsdiagram[scale = 1]{resources/Classes/Common/Shared/Configuration/RawConfiguration.png} \clsdcl{public class RawConfiguration} \clsdsp{} \begin{attributes} \attribute{public string RawValue \{ get; \}}{Value of the configuration} \end{attributes} \begin{constructors} \begin{constructor}{public RawConfiguration(string rawValue)}{Creates a new instance of the RawConfiguration with provided rawValue} \begin{parameters} \para{string rawValue}{Value of the RawConfiguration.} \end{parameters} \end{constructor} \end{constructors} \end{class}
{ "alphanum_fraction": 0.7442434211, "avg_line_length": 30.4, "ext": "tex", "hexsha": "65471033d837315189e8f6850b43333bfba23d1b", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-07-24T06:05:52.000Z", "max_forks_repo_forks_event_min_datetime": "2020-07-24T06:05:52.000Z", "max_forks_repo_head_hexsha": "0830f2155fb3b32dc127587e07cbd780deb0e118", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "prtest01/MORR", "max_forks_repo_path": "documents/sd/chapter/Classes/Common/Configuration/Configuration.tex", "max_issues_count": 110, "max_issues_repo_head_hexsha": "0830f2155fb3b32dc127587e07cbd780deb0e118", "max_issues_repo_issues_event_max_datetime": "2020-04-05T20:55:05.000Z", "max_issues_repo_issues_event_min_datetime": "2020-01-28T16:49:24.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "prtest01/MORR", "max_issues_repo_path": "documents/sd/chapter/Classes/Common/Configuration/Configuration.tex", "max_line_length": 134, "max_stars_count": 5, "max_stars_repo_head_hexsha": "0830f2155fb3b32dc127587e07cbd780deb0e118", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "insightmind/MORR", "max_stars_repo_path": "documents/sd/chapter/Classes/Common/Configuration/Configuration.tex", "max_stars_repo_stars_event_max_datetime": "2020-03-26T20:21:13.000Z", "max_stars_repo_stars_event_min_datetime": "2020-02-03T14:52:47.000Z", "num_tokens": 321, "size": 1216 }
\section*{Education} \begin{tabular}{p{0.1\textwidth} p{0.9\textwidth}} 2018 & Ph.D in Geophysics, University of Science and Technology of China, China \\ 2012 & B.S. in Geophysics, University of Science and Technology of China, China \end{tabular}
{ "alphanum_fraction": 0.7510040161, "avg_line_length": 41.5, "ext": "tex", "hexsha": "bc608a1571ab39ad0a095254868862f52c7e19ad", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "206a359ff7fcb036befa59fb9e1ce82f4406a9a0", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "core-man/cv", "max_forks_repo_path": "en/education.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "206a359ff7fcb036befa59fb9e1ce82f4406a9a0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "core-man/cv", "max_issues_repo_path": "en/education.tex", "max_line_length": 82, "max_stars_count": null, "max_stars_repo_head_hexsha": "206a359ff7fcb036befa59fb9e1ce82f4406a9a0", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "core-man/cv", "max_stars_repo_path": "en/education.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 77, "size": 249 }
\author{Sebastien Arnold \break [email protected]} \title{A Greedy Algorithm to Cluster Specialists} \date{\today} \documentclass[12pt]{article} \input{\string~/tex_templates/core} % Custom Title %\def\maketitle{ %\centering %\par\textbf{\LARGE\@title} %\par\hfill %\par{\@author, \@date} %\par\hfill %\par\hfill %\rule{\textwidth}{3pt} %} \def\maketitle{ \begin{centering} \par\rule{\textwidth}{2pt} \par\hfill \par\textbf{\LARGE\@title} \par\hfill \par{\textit{\@author}} \par\hfill \par{\@date} \par\rule{\textwidth}{2pt} \end{centering} } \begin{document} \thispagestyle{empty} \maketitle \hfill \abstract{ Several recent deep neural networks experiments leverage the generalist-specialist paradigm for classification. However, no formal study compared the performance of different clustering algorithms for class assignment. In this paper we perform such a study, suggest slight modifications to the clustering procedures, and propose a novel algorithm designed to optimize the performance of of the specialist-generalist classification system. Our experiments on the CIFAR-10 and CIFAR-100 datasets allow us to investigate situations for varying number of classes on similar data. We find that our \emph{greedy pairs} clustering algorithm consistently outperforms other alternatives, while the choice of the confusion matrix has little impact on the final performance. } \section{Introduction}\label{introduction} Designing an efficient classification system using deep neural networks is a complicated task, which often use a multitude of models arranged in ensembles. \cite{galaxy}, \cite{vgg} These ensembles often lead to state-of-the-art results on a wide range of different tasks such as image classification \cite{inception}, speech recognition \cite{deepspeech2}, and machine translation \cite{seq2seq}. The models are trained independently and in parallel, and different techniques can be used to merge their predictions. \begin{figure}[b] \centering \includegraphics[width=\textwidth]{./figs/specialists.png} \caption{An example of specialist architecture with three specialists.} \label{fig:1} \end{figure} A more structured alternative to ensembling is the use of the specialist-generalist framework. As described by \cite{bochereau1990}, a natural analogy can be drawn from the medical field; a patient first consults a general practitioner who provides an initial diagnosis which is then refined by one or several specialists. In the case of classification, the doctors are replaced by neural networks and the final prediction is a combination of the specialists' outputs, and may or may not include the generalist's take. In recent years, generalist and specialists have been studied under different circumstances. \cite{darkknowledge} used specialists to create an efficient image classifier for a large private dataset. The final predictions of the specialists were then used to train a reduced classifier that achieved performance similar to the whole ensemble. \cite{emonets} describe a multimodal approach for emotion recognition in videos, based on specialists. Maybe closer to our work, \cite{wardefarley} added ``auxiliary heads'' (acting as specialists) to their baseline network, using the precomputed features for both classification and clustering. They also underlined one of the main advantages of using specialists; a relatively low (and parallelizable) additional computational cost for increased performance. \section{Clustering Algorithms}\label{clustering-algorithms} In the generalist-specialist framework, each class is assigned to one or more specialists. This assignment is usually done by clustering the classes into non-overlapping sets. The goal of the clustering procedure is to optimize the generalist vs specialist accuracy trade-off. In that sense, the algorithm must carefully balance the classification performance of the generalist over the set of clusters and the specialist performance within those clusters. In the case of overlapping clusters, an additional weighting parameter for specialists can be added at inference time. In order to assign classes to the specialist networks, we compare several clustering algorithms on the confusion matrix of the outputs of the generalist. This confusion matrix is computed on a held-out partition of the dataset. Following previous works, we started by considering two baseline clustering algorithms, namely Lloyd's K-Means algorithm and Spectral clustering, according to the formulation of \cite{spectral}. In addition to those baseline algorithms, we evaluate the performance of two novel procedures specifically designed to improve the generalist-specialist paradigm. Those algorithms are described in the following paragraphs, and pseudo code is given in the Appendix. We also experimented with different ways of building the confusion matrix. Besides the usual way of constructing a confusion matrix by accumulating all predictions for each classes (denoted here as \emph{standard}), we tried three alternatives: \begin{itemize} \itemsep1pt\parskip0pt\parsep0pt \item \emph{softsum}: for each prediction, we use the raw model output instead of the one-hot multi-class output, \item \emph{softsum pred}: just like \emph{softsum}, but only add the prediction output to the confusion matrix, if the class was correctly predicted, \item \emph{softsum not pred}: like to \emph{softsum pred}, but only if the prediction output was incorrectly predicted. \end{itemize} As discussed in later sections, the influence of the confusion matrix is minimal. Nonetheless we include them for completeness purposes. Both of our clustering algorithms further modify the confusion matrix $A$ by computing $CM = \textbf{A}^\top + \textbf{A}$, which symmetrizes the matrix. We define the entries of the matrix to be the \emph{animosity score} between two classes; given classes \emph{a} and \emph{b}, their animosity score is found at $CM_{a, b}$. We then initialize each cluster with non-overlapping pairs of classes yielding maximal animosity score. Finally, we greedily select the next classes to be added to the clusters, according to the following rules: \begin{itemize} \item In the case of \emph{greedy single} clustering, a single class maximizing the overall animosity score is added to the cluster yielding the largest averaged sum of animosity towards this class. This partitions the classes in clusters, building on the intuition that classes that are hard to distinguish should be put together. \item In the case of \emph{greedy pairs} clustering, we follow the same strategy as in \emph{greedy single} clustering but act on pair of classes instead of single classes. In this case we allow the clusters to overlap, and one prediction might include the opinion of several specialists. \end{itemize} This process is repeated until all classes have been assigned to at least one cluster. \section{Experiments}\label{experiments} We investigate the performance of the aforementioned algorithms on the CIFAR-10 and CIFAR-100 datasets (\cite{cifar}). Both datasets contain similar images, partitioned in 45'000 train, 5'000 validation, and 10'000 test images. They contain 10 and 100 classes respectively. For both experiments we train the generalist network on the train set only, and use the validation set for clustering purposes. As we are interested in the clustering performance we do not augment nor pre-process the images. Note that when trained on the horizontally flipped training and validation set our baseline algorithm reaches 10.18\% and 32.22\% misclassification error respectively, which is competitive with the current state-of-the-art presented in \cite{allcnn}. Following \cite{binaryconnect}, the baseline network is based on the conclusions of \cite{vgg} and uses three pairs of batch-normalized convolutional layers, each followed by a max-pooling layer, and two fully-connected layers. The same model is used for specialists, whose weights are initialized with the trained weights of the generalist. \footnote{The code for these experiments is freely available online at \href{http://www.github.com/seba-1511/specialists}.} One major departure from the work of \cite{darkknowledge} is that our specialists are predicting over the same classes as the generalist, i.e.~we do not merge all classes outside of the cluster into a unique one. With regards to the generalist, a specialist is only biased towards a subset of the classes, since it has been fine-tuned to perform well on those ones. \subsection{CIFAR-10}\label{cifar-10} \begin{longtable}[c]{@{}lllll@{}} \toprule\addlinespace Results & standard & soft sum & soft sum pred & soft sum not pred \\\addlinespace \midrule\endhead spectral & (0.7046, 2) & (0.7719, 2) & (0.6989, 2) & (0.706, 2) \\\addlinespace greedy singles & (0.5873, 2) & (0.5049, 2) & (0.5139, 3) & (0.5873, 2) \\\addlinespace kmeans & (0.8202, 2) & (0.8202, 2) & (0.8202, 2) & (0.8202, 2) \\\addlinespace greedy pairs & (0.8835, 2) & (0.8835, 2) & (0.8727, 3) & (0.8835, 2) \\\addlinespace \bottomrule \addlinespace \caption{Experiment results for CIFAR-10} \end{longtable} For CIFAR-10 experiments, we considered up to five clusters, and all of the possible combinations of confusion matrix and clustering algorithms. The results for this experiments are reported in Table 1. For each clustering algorithm and confusion matrix type we report first the obtained accuracy, and then the number of clusters to reach it. %\begin{table} %\caption{Experiment results for CIFAR-10} %\label{tab:1} % Give a unique label %\begin{tabular}{p{3.2cm}p{2.0cm}p{2.0cm}p{2.0cm}p{2.0cm}} %\hline\noalign{\smallskip} %Clustering Algorithm & standard & softsum & softsum pred & softsum not pred \\ %%\noalign{\smallskip}\svhline\noalign{\smallskip} %spectral & (0.7046, 2) & (0.7719, 2) & (0.6989, 2) & (0.706, 2) \\ %greedy singles & (0.5873, 2) & (0.5049, 2) & (0.5139, 3) & (0.5873, 2) \\ %kmeans & (0.8202, 2) & (0.8202, 2) & (0.8202, 2) & (0.8202, 2) \\ %greedy pairs & (0.8835, 2) & (0.8835, 2) & (0.8727, 3) & (0.8835, 2) \\ %%\noalign{\smallskip}\hline\noalign{\smallskip} %\end{tabular} %\end{table} Interestingly, the choice of confusion matrix has only a limited impact on the overall performance, indicating that the emphasis should be put on the clustering algorithm. We notice that clustering with greedy pairs consistently yields better scores. However none of the specialist experiments is able to improve on the baseline, suggesting that specialists might not be the framework of choice when dealing with a small number of classes. \subsection{CIFAR-100}\label{cifar-100} For CIFAR-100 we performed the exact same experiment as for CIFAR-10 but used more specialists, the largest experiments involving 28 clusters. The results are shown in Table 2. Again, we report the obtained accuracy and the number of clusters for each clustering algorithm and confusion matrix type. %\begin{table} %\caption{Experiment results for CIFAR-100} %\label{tab:2} % Give a unique label %\begin{tabular}{p{3.2cm}p{2.0cm}p{2.0cm}p{2.0cm}p{2.0cm}} %\hline\noalign{\smallskip} %Clustering Algorithm & standard & softsum & softsum pred & softsum not pred \\ %%\noalign{\smallskip}\svhline\noalign{\smallskip} %spectral & (0.2632, 2) & (0.1769, 2) & (0.408, 4) & (0.2749, 2) \\ %greedy singles & (0.4112, 2) & (0.4356, 2) & (0.3851, 2) & (0.4308, 2) \\ %kmeans & (0.5123, 2) & (0.5123, 2) & (0.2058, 4) & (0.6141, 2) \\ %greedy pairs & (0.6407, 2) & (0.6375, 4) & (0.6408, 4) & (0.6413, 4) \\ %\noalign{\smallskip}\hline\noalign{\smallskip} %\end{tabular} %\end{table} \begin{longtable}[c]{@{}lllll@{}} \toprule\addlinespace Results & standard & soft sum & soft sum pred & soft sum not pred \\\addlinespace \midrule\endhead spectral & (0.5828, 2) & (0.5713, 2) & (0.5755, 2) & (0.5795, 3) \\\addlinespace greedy singles & (0.3834, 2) & (0.3733, 2) & (0.3803, 2) & (0.3551, 2) \\\addlinespace kmeans & (0.5908, 2) & (0.5618, 2) & (0.5820, 3) & (0.5876, 2) \\\addlinespace greedy pairs & (0.6141, 6) & (0.5993, 6) & (0.6111, 6) & (0.607, 6) \\\addlinespace \bottomrule \addlinespace \caption{Experiment results for CIFAR-100} \end{longtable} Similarly to CIFAR-10, we observe that greedy pairs clustering outperforms the other clustering techniques, and that the different types of confusion matrix have a limited influence on the final score. We also notice that fewer clusters tend to work better. Finally, and unlike the results for CIFAR-10, some of the specialists are able to improve upon the generalist, which confirms our intuition that specialists are better suited to problems involving numerous output classes. We suggest the following explanation for the improved performance of greedy pairs is the following. Allowing clusters to overlap leads to the assignment of difficult classes to multiple specialists. At inference time, more networks will influence the final prediction which is analogous to building a larger ensemble for difficult classes. \section{Conclusion and Future Work}\label{conclusion-and-future-work} We introduce a novel clustering algorithm for the specialist-generalist framework, which is able to consistently outperform other techniques. We also provide a preliminary study of the different factors coming into play when dealing with specialists, and conclude that the choice of confusion matrix from our proposed set only has little impact on the final classification outcome. Despite our encouraging results with clustering techniques, no one of our specialists-based experiments came close to compete with the generalist model trained on the entire train and validation set. This was a surprising outcome and we suppose that this effect comes from the size of the datasets. In both cases, 5'000 images corresponds to 10\% of the original training set and removing that many training examples has a drastic effect on both generalists and specialists. All the more so since we are not using any kind of data augmentation techniques, which could have moderated this downside. An obvious future step is to validate the presented ideas on a much larger dataset such as Imagenet \cite{imagenet} where splitting the train set would not hurt the train score as much. %\acknowledgement \subsubsection{Acknowledgments} We would like to thank Greg Ver Steeg, Gabriel Pereyra, and Pranav Rajpurkar for their comments and advices. We also thank Nervana Systems for providing GPUs as well as their help with their deep learning framework. \section{Appendix}\label{appendix} %\subsection{Greedy Pairs Pseudo Code}\label{greedy-pairs-pseudo-code} \begin{algorithm}[H] \caption{Greedy Pairs Clustering} \label{greedy_pairs} \begin{algorithmic}[1] % The number tells where the line numbering should start \Procedure{GreedyPairs}{$M,N$} \Comment{Confusion matrix M, number of clusters N} \State $M\gets M + M^T$ \State Initialize N clusters with non-overlapping pairs maximizing the entries of M. \While{every class has not been assigned} \State Get the next pair $(a, b)$ maximizing the entry in M \State cluster = $\underset{\text{c in clusters}}{\mathrm{argmin}}$(Animosity(a, c) + Animosity(b, c)) \State Assign(cluster, a, b) \EndWhile\label{euclidendwhile} \State \textbf{return} clusters \EndProcedure \end{algorithmic} \end{algorithm} %Note: A python implementation of both greedy pairs and greedy single can %be found at \url{http://www.github.com/seba-1511/specialists}. %\input{referenc} %\begin{thebibliography} %\bibliographystyle{apalike} %\bibliographystyle{} %\bibliography{biblio} \begin{thebibliography}{99.}% \bibitem{bochereau1990}Bochereau, Laurent, and Bourgine, Paul. A Generalist-Specialist Paradigm for Multilayer Neural Networks. Neural Networks, 1990. \bibitem{binaryconnect}Courbariaux, Matthieu, Bengio, Yoshua, and David, Jean-Pierre. BinaryConnect: Training Deep Neural Networks with Binary Weights during Propagations. NIPS, 2015. \bibitem{galaxy}Dieleman, Sander, Willett, Kyle W., and Dambre, Joni. Rotation-invarient convolutional neural networks for galaxy morphology prediction. Oxford Journals, 2015. \bibitem{deepspeech2}Hannun, Awni, Case, Carl, Casper, Jared, Catanzaro, Bryan, Diamos, Greg, Elsen, Erich, Prenger, Ryan, Satheesh, Sanjeev, Sengupta, Shubho, Coates, Adam, and Ng, Andrew Y. Deep Speach: Scaling up end-to-end speech recognition. Arxiv Preprint, 2014. \bibitem{darkknowledge}Hinton, Geoffrey E., Vinyals, Oriol, and Dean, Jeff. Distilling th Knowledge in a Neural Network. NIPS 2014 Deep Learning Workshop. \bibitem{emonets}Kahou, Samira Ebrahimi, Bouthiller, Xavier, Lamblin, Pascal, Gulcehre, Caglar, Michalski, Vincent, Konda, Kishore, Jean, Sébastien, Froumenty, Pierre, Dauphin, Yann, Boulanger-Lewandowski, Nicolas, Ferrari, Raul Chandias, Mirza, Mehdi, Warde-Farley, David, Courville, Aaron, Vincent, Pascal, Memisevic, Roland, Pal, Christopher, and Bengio, Yoshua. EmoNets: Multimodal deep learning approaches for emation recofnition in video. Journal on Mutlimodal User Interfaces, 2015. \bibitem{cifar}Krizhevsky, Alex. Learning Multiple Layers of Features from Tiny Images. 2009. \bibitem{spectral}Ng, Andrew Y., Jordan, Micheal I., Weiss, Yair. On spectral clustering: Analysis and an algorithm. NIPS 2002. \bibitem{imagenet}Russakovsky, Olga, Deng, Jia, Su, Hao, Krause, Jonathan, Satheesh, Sanjeev, Ma, Sean, huang, Zhiheng, Karpathy, Andrej, Khosla, Aditya, Bernstain, Michael, Berg, Alexander C., and Fei-Fei, Li. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision, 2015. \bibitem{vgg}Simonyan, Karen and Zisserman, Andrew. Very Deep Convolutional Networks for Large-Scale Image Recognition. International Conference on Learning Representations, 2015. \bibitem{allcnn}Springenberg, Jost Tobias, Dosovitskiy, Alexey, Brox, Thomas, and Riedmiller, Martin. Striving for Simplicity: The All Convolutional Net. International Conference on Learning Representations Workshop, 2015. \bibitem{seq2seq}Sutskever, Ilya, Vinyals, Oriol, and Le, Quoc V. Sequence to Sequence Learning with Neural Networks. Arxiv Preprint, 2014. \bibitem{inception}Szegedy, Christian, Liu, Wei, Jia, Yangqing, Sermanet, Pierre, Reed, Scott, Anguelov, Dragomir, Erhan, Dumitru, Vanhoucke, Vincent, and Rabinovich, Andrew. Going deeper with convolutions. Arxiv Preprint, 2014. \bibitem{wardefarley}Warde-Farley, David, Rabinovich, Andrew, and Anguelov, Dragomir. Self-Informed Neural Networks Structure Learning. International Conference on Representations Learning, 2015. \end{thebibliography} %\end{multicols} \end{document} %\abstract %With the recent advances in deep neural networks, several experiments %involved the generalist-specialist paradigm for classification. However, %until now no formal study compared the performance of different %clustering algorithms for class assignment. In this paper we perform %such a study, suggest slight modifications to the clustering procedures, %and propose a novel algorithm designed to optimize the performance of of %the specialist-generalist classification system. Our experiments on the %CIFAR-10 and CIFAR-100 datasets allow us to investigate situations for %varying number of classes on similar data. We find that our %\emph{greedy\_pairs} clustering algorithm consistently outperforms other %alternatives, while the choice of the confusion matrix has little impact %on the final performance. %\section{Introduction}\label{introduction} %Designing an efficient classification system using deep neural networks %is a complicated task, which often use a multitude of models arranged in %ensembles. (\cite{galaxy}, \cite{vgg}) Those ensembles often lead to %state-of-the-art results on a wide range of different tasks such as %image classification (\cite{inception}), speech recognition %(\cite{deepspeech2}), and machine translation (\cite{seq2seq}). The %models are trained independently and in parallel, and different %techniques can be used to merge their predictions. %\begin{figure}[htbp] %\centering %\includegraphics{./figs/specialists.png} %\caption{An example of specialist architecture with three specialists} %\end{figure} %A more structured alternative to ensembling is the use of the %specialist-generalist framework. As described by \cite{bochereau1990}, a %natural analogy can be drawn from the medical field; a patient first %consults a general practitioner who provides an initial diagnosis which %is then refined by one or several specialists. In the case of %classification, the doctors are replaced by neural networks and the %final prediction is a combination of the specialists' outputs, and may %or may not include the generalist's take. %In recent years, generalist and specialists have been studied under %different circumstances. \cite{darkknowledge} used specialists to create %an efficient image classifier for a large private dataset. The final %predictions of the specialists were then used to train a reduced %classifier that achieved performance similar to the whole ensemble. %\cite{emonets} describe a multimodal approach for emotion recognition in %videos, based on specialists. Maybe closer to our work, %\cite{wardefarley} added ``auxiliary heads'' (acting as specialists) to %their baseline network, using the precomputed features for both %classification and clustering. They also underlined one of the main %advantages of using specialists; a relatively low (and parallelizable) %additional computational cost for increased performance. %\section{Clustering Algorithms}\label{clustering-algorithms} %In order to assign classes to the specialist networks, we compare %several clustering algorithms on the confusion matrix of the outputs of %the generalist. This confusion matrix is computed on a held-out %partition of the dataset. Following previous works, we started by %considering two baseline clustering algorithms, namely Lloyd's K-Means %algorithm and Spectral clustering, according to the formulation of %\cite{spectral}. In addition to those baseline algorithms, we evaluate %the performance of two novel procedures specifically designed to improve %the generalist-specialist paradigm. Those algorithms are described in %the following paragraphs, and pseudo code is given in the Appendix. %We also experimented with different ways of building the confusion %matrix. Besides the usual way (denoted here as \emph{standard}) we tried %three alternatives: %\begin{itemize} %\itemsep1pt\parskip0pt\parsep0pt %\item %\emph{soft sum}: for each prediction, we use the raw model output %instead of the one-hot multi-class output, %\item %\emph{soft sum pred}: just like \emph{soft sum}, but only add the %prediction output to the confusion matrix, if the class was correctly %predicted, %\item %\emph{soft sum not pred}: like to \emph{soft sum pred}, but only if %the prediction output was incorrectly predicted. %\end{itemize} %As discussed in later sections, the influence of the confusion matrix is %minimal. Nonetheless we include them for completeness purposes. %Both of our clustering algorithms further modify the confusion matrix %$A$ by computing $CM = \textbf{A}^\top + \textbf{A}$, which symmetrizes %the matrix. We define the entries of the matrix to be the %\emph{animosity score} between two classes; given classes \emph{a} and %\emph{b}, their animosity score is found at $CM_{a, b}$. We then %initialize each cluster with non-overlapping pairs of classes yielding %maximal animosity score. Finally, we greedily select the next classes to %be added to the clusters, according to the following rules: %\begin{itemize} %\item %In the case of \emph{greedy single} clustering, a single class %maximizing the overall animosity score is added to the cluster %yielding the largest averaged sum of animosity towards this class. %This partitions the classes in clusters, building on the intuition %that classes that are hard to distinguish should be put together. %\item %In the case of \emph{greedy pairs} clustering, we follow the same %strategy as in \emph{greedy single} clustering but act on pair of %classes instead of single classes. In this case we allow the clusters %to overlap, and one prediction might include the opinion of several %specialists. %\end{itemize} %This process is repeated until all classes have been assigned to at %least one cluster. %\section{Experiments}\label{experiments} %We investigate the performance of the aforementioned algorithms on the %CIFAR-10 and CIFAR-100 datasets (\cite{cifar}). Both datasets contain %similar images, partitioned in 45'000 train, 5'000 validation, and %10'000 test images. They contain 10 and 100 classes respectively. For %both experiments we train the generalist network on the train set only, %and use the validation set for clustering purposes. As we are interested %in the clustering performance we did not augment nor pre-process the %images. Note that when trained on the horizontally flipped training and %validation set our baseline algorithm reaches 10.18\% and 32.22\% %misclassification error respectively, which is competitive with the %current state-of-the-art presented in \cite{allcnn}. %Following \cite{binaryconnect}, the baseline network is based on the %conclusions of \cite{vgg} and uses three pairs of batch-normalized %convolutional layers, each followed by a max-pooling layer, and two %fully-connected layers. The same model is used for specialists, whose %weights are initialized with the trained weights of the generalist. %\footnote{The code for those experiments, is freely available online at %\href{http://www.github.com/seba-1511/specialists}{github.com/seba-1511/specialists}.} %One major departure from the work of \cite{darkknowledge} is that our %specialists are predicting over the same classes as the generalist, %i.e.~we do not merge all classes outside of the cluster into a unique %one. With regards to the generalist, a specialist is only biased towards %a subset of the classes, since it has been fine-tuned to perform well on %those ones. %\subsection{CIFAR-10}\label{cifar-10} %For CIFAR-10 experiments, we considered up to five clusters, and all of %the possible combinations of confusion matrix and clustering algorithms. %The results for this experiments are reported in Table 1. %\begin{longtable}[c]{@{}lllll@{}} %\toprule\addlinespace %Results & standard & soft sum & soft sum pred & soft sum not pred %\\\addlinespace %\midrule\endhead %spectral & (0.7046, 2) & (0.7719, 2) & (0.6989, 2) & (0.706, 2) %\\\addlinespace %greedy singles & (0.5873, 2) & (0.5049, 2) & (0.5139, 3) & (0.5873, 2) %\\\addlinespace %kmeans & (0.8202, 2) & (0.8202, 2) & (0.8202, 2) & (0.8202, 2) %\\\addlinespace %greedy pairs & (0.8835, 2) & (0.8835, 2) & (0.8727, 3) & (0.8835, 2) %\\\addlinespace %\bottomrule %\addlinespace %\caption{Experiment results for CIFAR-10} %\end{longtable} %Interestingly, the choice of confusion matrix has only a limited impact %on the overall performance, indicating that the emphasis should be put %on the clustering algorithm. We notice that clustering with greedy pairs %consistently yields better scores. However none of the specialist %experiments is able to improve on the baseline, suggesting that %specialists might not be the framework of choice when dealing with a %small number of classes. %\subsection{CIFAR-100}\label{cifar-100} %For CIFAR-100 we performed the exact same experiment as for CIFAR-10 but %used more specialists, the largest experiments involving 28 clusters. %The results are shown in Table 2. %\begin{longtable}[c]{@{}lllll@{}} %\toprule\addlinespace %Results & standard & soft sum & soft sum pred & soft sum not pred %\\\addlinespace %\midrule\endhead %spectral & (0.5828, 2) & (0.5713, 2) & (0.5755, 2) & (0.5795, 3) %\\\addlinespace %greedy singles & (0.3834, 2) & (0.3733, 2) & (0.3803, 2) & (0.3551, 2) %\\\addlinespace %kmeans & (0.5908, 2) & (0.5618, 2) & (0.5820, 3) & (0.5876, 2) %\\\addlinespace %greedy pairs & (0.6141, 6) & (0.5993, 6) & (0.6111, 6) & (0.607, 6) %\\\addlinespace %\bottomrule %\addlinespace %\caption{Experiment results for CIFAR-100} %\end{longtable} %Similarly to CIFAR-10, we observe that greedy pairs clustering %outperforms the other clustering techniques, and that the different %types of confusion matrix have a limited influence on the final score. %We also notice that fewer clusters tend to work better. Finally, and %unlike the results for CIFAR-10, some of the specialists are able to %improve upon the generalist, which confirms our intuition that %specialists are better suited to problems involving numerous output %classes. %We suggest the following explanation for the improved performance of %greedy pairs is the following. Allowing clusters to overlap leads to the %assignment of difficult classes to multiple specialists. At inference %time, more networks will influence the final prediction which is %analogous to building a larger ensemble for difficult classes. %\section{Conclusion and Future Work}\label{conclusion-and-future-work} %We introduced a novel clustering algorithm for the specialist-generalist %framework, which is able to consistently outperform other techniques. We %also provided a preliminary study of the different factors coming into %play when dealing with specialists, and concluded that the choice of %confusion matrix from our proposed set only has little impact on the %final classification outcome. %Despite our encouraging results with clustering techniques, no one of %our specialists-based experiments came close to compete with the %generalist model trained on the entire train and validation set. This %was a surprising outcome and we suppose that this effect comes from the %size of the datasets. In both cases, 5'000 images corresponds to 10\% of %the original training set and removing that many training examples has a %drastic effect on both generalists and specialists. All the more so %since we are not using any kind of data augmentation techniques, which %could have moderated this downside. An obvious future step is to %validate the presented ideas on a much larger dataset such as %\cite{imagenet} where splitting the train set would not hurt the train %score as much. %\subsubsection{Acknowledgments}\label{acknowledgments} %We would like to thank Greg Ver Steeg, Gabriel Pereyra, and Pranav Rajpurkar for their comments and advices. We also thank Nervana Systems %for providing GPUs as well as their help with their deep learning %framework. %\bibliographystyle{apalike} %\bibliography{biblio} %\section{Appendix}\label{appendix} %\subsection{Greedy Pairs Pseudo Code}\label{greedy-pairs-pseudo-code} %\begin{algorithm} %\caption{Greedy Pairs Clustering} %\label{greedy_pairs} %\begin{algorithmic}[1] % The number tells where the line numbering should start %\Procedure{GreedyPairs}{$M,N$} \Comment{Confusion matrix M, number of clusters N} %\State $M\gets M + M^T$ %\State Initialize N clusters with non-overlapping pairs maximizing the entries of M. %\While{every class has not been assigned} %\State Get the next pair $(a, b)$ maximizing the entry in M %\State cluster = $\underset{\text{c in clusters}}{\mathrm{argmin}}$(Animosity(a, c) + Animosity(b, c)) %\State Assign(cluster, a, b) %\EndWhile\label{euclidendwhile} %\State \textbf{return} clusters %\EndProcedure %\end{algorithmic} %\end{algorithm} %Note: A python implementation of both greedy pairs and greedy single can %be found at \url{http://www.github.com/seba-1511/specialists}. %%\end{multicols} %\end{document}
{ "alphanum_fraction": 0.7692716474, "avg_line_length": 45.4403409091, "ext": "tex", "hexsha": "3e50e902102ecfd349cfe86280e98ea31948b36b", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9888e639707142db80aafe6ae7bf25f572d34505", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "seba-1511/specialists", "max_forks_repo_path": "paper/paper.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9888e639707142db80aafe6ae7bf25f572d34505", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "seba-1511/specialists", "max_issues_repo_path": "paper/paper.tex", "max_line_length": 139, "max_stars_count": 1, "max_stars_repo_head_hexsha": "9888e639707142db80aafe6ae7bf25f572d34505", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "seba-1511/specialists", "max_stars_repo_path": "paper/paper.tex", "max_stars_repo_stars_event_max_datetime": "2016-05-31T07:54:31.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-31T07:54:31.000Z", "num_tokens": 8499, "size": 31990 }
%!TEX root = ../PatilM-[RnD-MT]Report.tex \chapter{Methodology} \paragraph{}This chapter aims to highlight both the hardware and software setup of the developed system. The integration of the software libraries and packages has been implemented and tested extensively on the `KUKA youBot' which is the aforementioned hardware platform. The following sections briefly illustrate the applied hardware and explain the software packages and their integration with the QSRLib library, which allows us to express physical space qualitatively. \section{Hardware Setup} \paragraph{}The applied robot platform for our application is the `KUKA youBot'. This is an omni-directional platform, equipped with a 5 Degree of freedom robotic arm, that features a two finger gripper. The robot base is equipped with two `Hokuyo URG-04LX' laser range finders situated at the front and at the back respectively, this placement allows for robust localization and map-based navigation in known indoor environments \cite{Roscoe2012}. Physically the dimensions of the robot are as follows length 58cm, width 38cm, height 14cm with a ground clearance of 2cm and a minimum and maximum velocity of 0.01m/s and 0.8m/s respectively \cite{youbot}. The power to this applied platform comes from a 24 volt, 5 Ah lead-acid battery that has an approximate optimal runtime of 90 minutes, but this varies depending upon a multitude of factors such as the robot's velocity, sensors used etc. \begin{figure}[h] \centering \includegraphics[scale=1]{images/youbot} \caption{The youBot robot platform} \label{fig:youbot} \end{figure} \paragraph{}The robotic arm plays host to a camera mounting which supports the `ASUS Xtion Pro Live' RGB-D camera, which as the name suggests supports the perception of depth information in addition to the usual RGB or raw image data. This RGB-D camera has a detection range of 80cm - 3.5m with a field of view limited to $\ang{58}, \ang{45}, \ang{70}$ horizontally, vertically and diagonally respectively \cite{swoboda2014comprehensive}. The image size for this camera is `640x480' at 30frames per second in a VGA format, also being highly compatible with the `OpenNi development framework' ensures its easy integration with the ROS packages by the `b-it-bots RoboCup@Work' team, which has already developed scene segmentation and object detection applications centered around this sensor. The stock internal computer that comes issued with the youBot has been replaced with a `Intel Core i5' processor that facilitates highly computational perception tasks to be run directly on the platform \cite{Roscoe2012}. \begin{figure}[h] \centering \includegraphics[scale=0.5]{images/arm_config_l} \caption{Arm position used in the development and testing of the approach.} \label{fig:youbotarm} \end{figure} \section{Software Framework} The software framework underpinning the applied robot platform has been developed by the `b-it bots @Work' team and is based on ROS(Kinetic Kame version) or the robot operating system \cite{quigley2009ros}, which provides a modular and distributed structure to design a functional system based on the developer's requirements. This modular architecture provides a rapid and robust communication infrastructure that is based on actions, services and clients to interchange data amongst the various different functional, software components of the applied platform. The framework provides an effective and efficient medium for interfacing various sensors and actuators such as laser scanners, cameras etc. \paragraph{}Another merit of the robot operating system is its provision of advanced tools for visualizing and testing various types of data and troubleshooting the entire system in cases of failures or errors. One such heavily utilized tool was `rosbag', used particularly to capture data \cite{Hegger2012} and evaluate the developed implementation with varying parameters but always on the same set of captured data. \section{Integration of Qualitative representations} \paragraph{}The current software framework, set-up on the robot consists of numerous packages that allow the control of its base as well as arm actuators while also providing an effective interface to its various sensors. This modular approach facilitates the use of these small components to develop a higher level task. In the instance of `Qualitative spatial representations' we need to access the raw RGB image from the camera to detect features or objects and further extract their approximated pose in order to build a qualitative relation with respect to the robot. The nature of this relation depends upon the type of qualitative calculi that is being implemented in order to define these abstractions. \paragraph{}As the objective of this project is to show an generalized and efficient utilization of the existing qualitative calculi, we do not develop any of these calculi from scratch, instead deciding to exploit an existing qualitative spatial representations library called `QSRLib' \cite{gatsoulis2016qsrlib}, this library contains ROS compatible python implementations of the various qualitative calculi, as discussed in the previous chapter. Although this library can be used either as a standalone python package or a ROS catkin package, we use it primarily with ROS and hence shall focus on its installation and integration with the same. The `qsr\_lib' package is the one that is being used in our implementation and has system dependencies on `numpy' and `matplotlib'. Installing the library is extremely easy as it involves directly cloning the repository(https://github.com/strands-project/strands\_qsr\_lib.git) from git, and moving the `qsr\_lib' package into the `src' folder of our catkin workspace \cite{qsrlib}. \paragraph{}The pre-requisites for using any of the qualitative calculi implemented in the library \cite{qsrlib}, \cite{gatsoulis2016qsrlib} is input data such as distinctive object id's or names for the various objects amongst which a qualitative relation is desired, a time-series of the states of the perceived objects and the Cartesian coordinates for each of the objects at every instance of the time series. This information is packaged into a custom input data object that is the default input data format of the library. Additional information such as the size of the object may also be included in the input data object, but it is mandatory to include the name of the qualitative calculi for which the qualitative spatial relations are to be computed. This input data structure is sent to a QSRLib service in the form of a request message, the server then computes the relationships and send the output in the form of a response message(client-service architecture in ROS) that details the qualitative relations between the objects constrained by their respective time stamps. The library comes built with the necessary functions that can be used to convert raw data into the data structure format required by the library. The output of the QSRLib can then be further inspected and employed to make decisions regarding the movement or path of the applied mobile platform. \begin{figure}[h] \centering \includegraphics[scale=0.7]{images/qsrlib_flow} \caption{A flow chart detailing the inner workings of the QSRLib library \cite{qsrlib}, \cite{gatsoulis2016qsrlib}.} \label{fig:qsrlibflow} \end{figure} %\section{Use Cases} %\paragraph{} The main aim of this project is to evaluate the efficiency paradigm of qualitative spatial representations for navigation in mobile robots, especially in closed indoor spaces where quantitative representations of the physical space are considered excessive and unnecessary. Therefore keeping in mind these preconditions we define the following possible use cases. % %\subsubsection*{Use case: Navigating in a corridor environment} %\paragraph{} A mobile robot is tasked with navigating from point `A' to point `B' in a corridor environment such that it should avoid collision with the walls and any other static or dynamic obstacles if they exist in its path. Furthermore the robot's movement should be such that it avoids any sudden motions that may seem unsafe or unintuitive to an human agent who might interact with the robot. Also chiefly, the robot must achieve this path traversal in a manner that is as efficient as possible ,while dealing with imprecise information about the environment or in extreme cases lack of complete information about the environment. % %\subsubsection*{Resulting Requirements} %\paragraph{} Ideally it is desired that any new functionality that is being implemented must be highly generalizable, but keeping in mind the given problem it is unrealistic to expect a `one size fits all' implementation that works impeccably for all imaginable situations without any restrictions or compromises. Hence a list of requirements resulting from the problem statement and the above described use case is presented below: % %\begin{itemize} % \item The starting position should be irrelevant when navigating the corridor. % % \item The detection of the markers or features should be robust with respect to a reasonable speed of the robot. % % \item The number of markers or features should not adversely affect the robot's behavior. % % \item The camera used should have a reasonable field of view so that it can see both the walls and their respective features at any given point of time. % % \item The camera should be able to capture the features reasonably well, irrespective of the lighting conditions. % % \item The motion profile of the robot should be a smooth and not disruptive. % % \item It should be able to navigate any corridor irrespective of its size or the color of its walls. %\end{itemize}
{ "alphanum_fraction": 0.8095286885, "avg_line_length": 143.5294117647, "ext": "tex", "hexsha": "074e10dd971e06e0372e119cf53960ca3706f78f", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "da3c6b47cdd7ec9b211a33d107ec4a6b2a0ff4b3", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "MihirSPatil/Utilizing-qualitative-spatial-representations-for-control-of-mobile-robots-", "max_forks_repo_path": "Report/project-report-1.0.1/chapters/ch04_methodology.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "da3c6b47cdd7ec9b211a33d107ec4a6b2a0ff4b3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "MihirSPatil/Utilizing-qualitative-spatial-representations-for-control-of-mobile-robots-", "max_issues_repo_path": "Report/project-report-1.0.1/chapters/ch04_methodology.tex", "max_line_length": 1380, "max_stars_count": null, "max_stars_repo_head_hexsha": "da3c6b47cdd7ec9b211a33d107ec4a6b2a0ff4b3", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "MihirSPatil/Utilizing-qualitative-spatial-representations-for-control-of-mobile-robots-", "max_stars_repo_path": "Report/project-report-1.0.1/chapters/ch04_methodology.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2052, "size": 9760 }
\chapter{Background}\label{chap:background} Explain the math and notation. \input{figures/background/algBackpropagation} \input{figures/background/figTikz}
{ "alphanum_fraction": 0.8387096774, "avg_line_length": 38.75, "ext": "tex", "hexsha": "bc5a382794843bcf10d227dd43cf8c2cebbbbb77", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ab93ae817cd027289191db906dca036a02691a73", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "PhilJd/thesis-template", "max_forks_repo_path": "chapters/3-background.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ab93ae817cd027289191db906dca036a02691a73", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "PhilJd/thesis-template", "max_issues_repo_path": "chapters/3-background.tex", "max_line_length": 45, "max_stars_count": 1, "max_stars_repo_head_hexsha": "ab93ae817cd027289191db906dca036a02691a73", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "PhilJd/thesis-template", "max_stars_repo_path": "chapters/3-background.tex", "max_stars_repo_stars_event_max_datetime": "2018-08-03T10:40:03.000Z", "max_stars_repo_stars_event_min_datetime": "2018-08-03T10:40:03.000Z", "num_tokens": 40, "size": 155 }
\newcommand{\pto}{\to_{*}} \section{Notation} This specification features some changes to the notation used in previous specifications. \begin{description} \item[Maps and partial functions] We use the notation $f : A \pto B$ to denote a finitely supported partial function. If $B$ is a monoid, $f$ is a function such that $f a = 0$ for all but finitely many $a$. Otherwise it is a function $f : A \to B^?$ such that $f a = \Nothing$ for all but finitely many $a$. \item[Map operations] We use standard notation for restriction and corestriction of functions to operate on partial functions as well. \item[Working with partial values] We sometimes need a notation that bubbles up $\Nothing$. $t^?$ might be nice, but how does it behave for things like sets \& functions? Also, we sometimes need to do an equality check except in a $\Nothing$ case. Good notation for that would be nice. \item[Accessor functions] ??? \item[Record updates] ??? \end{description}
{ "alphanum_fraction": 0.7308868502, "avg_line_length": 40.875, "ext": "tex", "hexsha": "3bfd9ca30675147a728773ca59c3de69adcc9827", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2022-03-14T16:56:46.000Z", "max_forks_repo_forks_event_min_datetime": "2022-03-14T16:56:46.000Z", "max_forks_repo_head_hexsha": "9c3b4737b13b30f71529e76c5330f403165e28a6", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "MELD-labs/cardano-ledger", "max_forks_repo_path": "eras/babbage/formal-spec/notation.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9c3b4737b13b30f71529e76c5330f403165e28a6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "MELD-labs/cardano-ledger", "max_issues_repo_path": "eras/babbage/formal-spec/notation.tex", "max_line_length": 89, "max_stars_count": null, "max_stars_repo_head_hexsha": "9c3b4737b13b30f71529e76c5330f403165e28a6", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "MELD-labs/cardano-ledger", "max_stars_repo_path": "eras/babbage/formal-spec/notation.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 261, "size": 981 }
\subsection{$A$-sequence and $A$-matrix} \label{sec:back:to:the:basics:sequences} In this section we rework the concept of $A$-sequence introduced by \citeauthor{rogers:1977} and discussed in \autoref{sec:back:to:the:basics:rogers}, using the \emph{Riordan group} to characterize it for \emph{Riordan arrays}. Furthermore, we will see a generalization of this concept, introducing $A$-matrices and some applications are reported for the sake of clarity. \begin{theorem} $\mathcal{M}$ is a Riordan array over a matrix of coefficients $\lbrace m_{n,k}\rbrace_{n,k\in\mathbb{N}}$ if and only if there exists a sequence $\vect{a}=\lbrace a_{n}\rbrace_{n\in\mathbb{N}}$, with $a_{0}\neq0$, such that: \begin{displaymath} m_{n+1,k+1}=a_{0}m_{n,k}+a_{1}m_{n,k+1}+a_{2}m_{n,k+2}+\ldots+a_{j}m_{n,k+j} \end{displaymath} where $n,k,j\in\mathbb{N}$ and $k+j=n$. Sequence $\vect{a}$ is called the $A$-sequence of Riordan array $\mathcal{M}$. \label{thm:merlini:A:sequence:characterization} \end{theorem} \marginpar{The idea behind this proof comes from \cite{he:sprugnoli:2009}} \begin{proof}[Proof of ($\rightarrow$) direction] Let $\mathcal{M}=(d_{\mathcal{M}}(t),h_{\mathcal{M}}(t))$ be a Riordan array. Since it is requested to prove that such a sequence $\vect{a}$ \emph{exists}, we proceed to build a Riordan array $\mathcal{A}=(d_{\mathcal{A}}(t),h_{\mathcal{A}}(t))$ such that function $d_{\mathcal{A}}$ is the \ac{gf} for sequence $\vect{a}$, proving this direction. Riordan array $\mathcal{A}$ is defined as the solution of the following relation: \begin{displaymath} (d_{\mathcal{M}}(t),h_{\mathcal{M}}(t))\cdot(d_{\mathcal{A}}(t),h_{\mathcal{A}}(t)) = \left(d_{\mathcal{M}}(t)\frac{h_{\mathcal{M}}(t)}{t},h_{\mathcal{M}}(t)\right) \end{displaymath} by definition of group operator $\cdot$, the \ac{lhs} can be rewritten as: \begin{displaymath} (d_{\mathcal{M}}(t)d_{\mathcal{A}}(h_{\mathcal{M}}(t)),h_{\mathcal{A}}(h_{\mathcal{M}}(t))) = \left(d_{\mathcal{M}}(t)\frac{h_{\mathcal{M}}(t)}{t},h_{\mathcal{M}}(t)\right) \end{displaymath} Looking at second components, the following relation defines function $h_{\mathcal{A}}$: \begin{displaymath} \left[h_{\mathcal{A}}(y)=y \mid y = h_{\mathcal{M}}(t)\right] \end{displaymath} while looking at the first component, the following relation defines function $d_{\mathcal{A}}$: \marginpar{a \ac{gf} over $A$-sequence $\vect{a}$ for Riordan arrays} \begin{displaymath} \left[d_{\mathcal{A}}(y)=\frac{y}{\hat{h}_{\mathcal{M}}(y)} \mid y = h_{\mathcal{M}}(t)\right] \end{displaymath} therefore it is possible to write the complete definition for array $\mathcal{A}$: \begin{displaymath} \mathcal{A}=(d_{\mathcal{A}}(y),h_{\mathcal{A}}(y)) =\left(\frac{y}{\hat{h}_{\mathcal{M}}(y)}, y \right) \end{displaymath} \marginpar{although we are in the Riordan group, here we go back to matrix theory} Assume function $d_{\mathcal{A}}$ can be expanded as \ac{fps} over a sequence $\vect{a}=\lbrace a_{n}\rbrace_{n\in\mathbb{N}}$, therefore expanding array $\mathcal{A}$ as a matrix $\lbrace a_{nk}\rbrace_{n,k\in\mathbb{N}}$ yield: \begin{displaymath} \mathcal{A} = \left[ \begin{array}{cccccc} a_{0} & & & & &\\ a_{1} & a_{0} & & & &\\ a_{2} & a_{1}& a_{0}& & &\\ a_{3} & a_{2}& a_{1}& a_{0}& &\\ a_{4} & a_{3}& a_{2}& a_{1}& a_{0} &\\ \vdots & \vdots& \vdots& \vdots& \vdots & \ddots\\ \end{array} \right] \end{displaymath} so doing the \emph{matrix} product of array $\mathcal{M}$ with array $\mathcal{A}$ yield: \begin{displaymath} \mathcal{M}\mathcal{A} = \left[ \begin{array}{cccccc} m_{00} & & & & &\\ m_{10} & m_{11} & & & &\\ m_{20} & m_{21}& m_{22}& & &\\ m_{30} & m_{31}& m_{32}& m_{33}& &\\ m_{40} & m_{41}& m_{42}& m_{43}& m_{44} &\\ \vdots & \vdots& \vdots& \vdots& \vdots & \ddots\\ \end{array} \right] \left[ \begin{array}{cccccc} a_{0} & & & & &\\ a_{1} & a_{0} & & & &\\ a_{2} & a_{1}& a_{0}& & &\\ a_{3} & a_{2}& a_{1}& a_{0}& &\\ a_{4} & a_{3}& a_{2}& a_{1}& a_{0} &\\ \vdots & \vdots& \vdots& \vdots& \vdots & \ddots\\ \end{array} \right] \end{displaymath} Consider coefficient at row $n$ and column $k$ of $\mathcal{M}\mathcal{A}$, by definition of \emph{matrix} product: \begin{displaymath} \left(\mathcal{M}\mathcal{A}\right)_{nk}= \sum_{j=0}^{n}{m_{nj}{a_{jk}}} =\sum_{j=0}^{n-k}{m_{n,k+j}{a_{k+j,k}}} =\sum_{j=0}^{n-k}{m_{n,k+j}{a_{j}}} \end{displaymath} Do the same for coefficient at row $n$ and column $k$ of $\left(d_{\mathcal{M}}(t)\frac{h_{\mathcal{M}}(t)}{t},h_{\mathcal{M}}(t)\right)$: \begin{displaymath} [t^{n+1}]d_{\mathcal{M}}(t)\,h_{\mathcal{M}}(t)^{k+1} = m_{n+1,k+1} \end{displaymath} so equating this and the previous result yield: \begin{displaymath} m_{n+1,k+1}=\sum_{j=0}^{n}{m_{n,k+j}{a_{j}}} \end{displaymath} as required. \end{proof} The other direction is less interesting since it is not a \emph{constructive} one, so we left it as an exercise. \\\\ The above proof leave space for another result. Start again from the following equation: \begin{displaymath} (d_{\mathcal{M}}(t)d_{\mathcal{A}}(h_{\mathcal{M}}(t)),h_{\mathcal{M}}(t)) = \left(d_{\mathcal{M}}(t)\frac{h_{\mathcal{M}}(t)}{t},h_{\mathcal{M}}(t)\right) \end{displaymath} and consider coefficient at row $n$ and column $k$ in both arrays, as done before: \begin{displaymath} [t^{n}]d_{\mathcal{A}}(h_{\mathcal{M}}(t))\left(d_{\mathcal{M}}(t)\,h_{\mathcal{M}}(t)^{k}\right) = [t^{n+1}]d_{\mathcal{M}}(t)\,h_{\mathcal{M}}(t)^{k+1} \end{displaymath} \marginpar{sequence $\vect{\hat{a}}$, with \ac{gf} $\hat{A}(t)$, is yet another characterization} on the \ac{lhs} appears a convolution of function $\hat{A}(t)=d_{\mathcal{A}}(h_{\mathcal{M}}(t))$, which can be written as \ac{fps} over sequence $\vect{\hat{a}}=\lbrace\hat{a}_{i}\rbrace_{i\in\mathbb{N}}$, with function $m_{k}(t)=d_{\mathcal{M}}(t)\,h_{\mathcal{M}}(t)^{k}$. Therefore the following identity holds as well: \begin{displaymath} \sum_{j=0}^{n}{\hat{a}_{j}\,m_{n-j,k}} = m_{n+1,k+1} \end{displaymath} combining this result with the one stated in the above theorem, the following holds: \begin{displaymath} \sum_{j=0}^{n}{\hat{a}_{j}\,m_{n-j,k}} =\sum_{j=0}^{n}{m_{n,k+j}{a_{j}}} \end{displaymath} Yet another characterization for a coefficient $m_{nk}$ in a Riordan array $\mathcal{M}$ can be stated and is closely related to $\mathcal{M}$'s $A$-sequence. \begin{theorem} Let $\mathcal{M}$ be a Riordan array over a matrix of coefficients $\lbrace m_{n,k}\rbrace_{n,k\in\mathbb{N}}$, then there exists a sequence $\vect{\omega}=\lbrace \omega_{n}\rbrace_{n\in\mathbb{N}}$ such that: \begin{displaymath} m_{nk}=\sum_{j=0}^{n-k}{\omega_{j}\,m_{n+1,k+1+j}} \end{displaymath} moreover, denoting with $\Omega$ a \ac{gf} over sequence $\vect{\omega}$ and with $A$ the \ac{gf} for $\mathcal{M}$'s $A$-sequence, then: \begin{displaymath} A(t)\,\Omega(t)=1 \end{displaymath} \label{thm:characterization:next:row} \end{theorem} \begin{proof} Assume not, therefore for \emph{each} sequence $\vect{\omega}=\lbrace \omega_{n}\rbrace_{n\in\mathbb{N}}$, from: \begin{displaymath} m_{nk}\neq\sum_{j=0}^{n-k}{\omega_{j}\,m_{n+1,k+1+j}} \end{displaymath} we have to show a contraddiction. By $A$-sequence characterization, there exists a sequence $\vect{a}=\lbrace a_{n}\rbrace_{n\in\mathbb{N}}$ such that allows us to rewrite terms $m_{n+1,k+1+j}$ within the sum in the \ac{rhs}: \begin{displaymath} \begin{split} m_{nk} &\neq \omega_{0}\left(a_{0}m_{n,k}+a_{1}m_{n,k+1}+a_{2}m_{n,k+2}+a_{3}m_{n,k+3}+\ldots+a_{j_{0}}m_{n,k+j_{0}}\right)\\ &+ \omega_{1}\left(a_{0}m_{n,k+1}+a_{1}m_{n,k+2}+a_{2}m_{n,k+3}+a_{3}m_{n,k+4}+\ldots+a_{j_{1}}m_{n,k+j_{1}}\right)\\ &+ \omega_{2}\left(a_{0}m_{n,k+2}+a_{1}m_{n,k+3}+a_{2}m_{n,k+4}+a_{3}m_{n,k+5}+\ldots+a_{j_{2}}m_{n,k+j_{2}}\right)\\ &+ \omega_{3}\left(a_{0}m_{n,k+3}+a_{1}m_{n,k+4}+a_{2}m_{n,k+5}+a_{3}m_{n,k+6}+\ldots+a_{j_{3}}m_{n,k+j_{3}}\right)\\ &\ldots\\ &+ \omega_{n-k-1}\left(a_{0}m_{n,n-1}+a_{1}m_{n,n}\right)\\ &+ \omega_{n-k}\left(a_{0}m_{n,n}\right)\\ \end{split} \end{displaymath} factoring coefficients $m_{nk}$, the \ac{rhs} can be rewritten as: \begin{displaymath} \begin{split} m_{nk}&\neq \omega_{0}a_{0}m_{n,k} + \left(\sum_{i_{1}+i_{2}=1}{\omega_{i_{1}}a_{i_{2}}}\right)m_{n,k+1} + \left(\sum_{i_{1}+i_{2}=2}{\omega_{i_{1}}a_{i_{2}}}\right)m_{n,k+2}\\ &+ \left(\sum_{i_{1}+i_{2}=3}{\omega_{i_{1}}a_{i_{2}}}\right)m_{n,k+3} + \ldots + \left(\sum_{i_{1}+i_{2}=n-k}{\omega_{i_{1}}a_{i_{2}}}\right)m_{n,n} \end{split} \end{displaymath} which can be written more compactly as: \begin{displaymath} m_{nk} \neq \omega_{0}a_{0}m_{n,k} + \sum_{j=1}^{n-k}{\left(\sum_{i_{1}+i_{2}=j}{\omega_{i_{1}}a_{i_{2}}}\right)m_{n,k+j}} \end{displaymath} but if we take sequence $\vect{\omega}$ to be the \emph{inverse} sequence of $\mathcal{M}$'s $A$-sequence $\vect{a}$, then the previous equation is false, since $\omega_{0}a_{0}=1$ and $\sum_{i_{1}+i_{2}=j}{\omega_{i_{1}}a_{i_{2}}}=0$, for each $j\in\lbrace1,\ldots,n-k\rbrace$, equality \emph{holds}. A contraddiction occurs, as required. \end{proof} \subsubsection{$A$-matrix} We are now ready to tackle the most general characterization for a coefficient $m_{nk}$ in a Riordan array $\mathcal{M}$, the concept of $A$-matrix. Actually, we will see only one formulation but there exist another two of them, yet more general indeed. The following theorem and the proof idea comes from \cite{merlini:some:alternative:characterizations:1997}, where additional formulation of the $A$-matrix concept can be found. \begin{theorem} $\mathcal{M}$ is a Riordan array, over a matrix of coefficients $\lbrace m_{n,k}\rbrace_{n,k\in\mathbb{N}}$, if there exists a matrix $\lbrace \sigma_{nk}\rbrace_{n,k\in\mathbb{N}}$ such that: \begin{displaymath} m_{n+1,k+1}=\sum_{i\in\mathbb{N}}{\sum_{j\in\mathbb{N}}{\sigma_{ij}m_{n-i,k+j}}} \end{displaymath} \end{theorem} \begin{proof} The idea underlying this proof is to show that a sequence $\vect{a}=\lbrace a_{n}\rbrace_{n\in\mathbb{N}}$ can be built from matrix $\lbrace \sigma_{nk}\rbrace_{n,k\in\mathbb{N}}$, such that $\vect{a}$ can be used to combine elements in matrix $\lbrace m_{n,k}\rbrace_{n,k\in\mathbb{N}}$ according to: \begin{displaymath} m_{n+1,k+1}=a_{0}m_{nk}+a_{1}m_{n,k+1}+\ldots+a_{j}m_{n,k+j} \end{displaymath} where $k+j=n$, as usual. If we success, then sequence $\vect{a}$ is the $A$-sequence for $\lbrace m_{n,k}\rbrace_{n,k\in\mathbb{N}}$, therefore $\mathcal{M}$ is a Riordan array. \\\\ Choose $n\in\mathbb{N}$, then proceed by natural induction on $n-k$: \begin{itemize} \item base case, $n-k=0$. By assumption the following holds: \begin{displaymath} m_{n+1,n+1}=\sigma_{00}\,m_{nn} \end{displaymath} which suggests to set $a_{0}=\sigma_{00}$; \item although not required, consider case $n-k=1$. By assumption the following holds: \begin{displaymath} m_{n+1,n}=\sigma_{00}m_{n,n-1}+\sigma_{01}m_{n,n}+\sigma_{10}m_{n-1,n-1} \end{displaymath} by \autoref{thm:characterization:next:row}, there exist a sequence $\vect{\omega}= \lbrace \omega_{n}\rbrace_{n\in\mathbb{N}}$ which allow us to rewrite $m_{n-1,n-1}$ as a combination of $m_{nn}$: \begin{displaymath} m_{n+1,n}=\sigma_{00}m_{n,n-1}+\left(\sigma_{01}+\sigma_{10}\omega_{0}\right)m_{nn} \end{displaymath} which reinforces $a_{0}=\sigma_{00}$ and suggests to set $a_{1}=\sigma_{01}+\sigma_{10}\omega_{0}$; moreover, using sequence $\vect{\omega}$ to rewrite terms in the \ac{rhs}: \begin{displaymath} m_{n+1,n}=\sigma_{00}\left(\omega_{0}m_{n+1,n}+\omega_{1}m_{n+1,n+1}\right)+ \left(\frac{\sigma_{01}+\sigma_{10}\omega_{0}}{\sigma_{00}}\right)m_{n+1,n+1} \end{displaymath} manipulating: \begin{displaymath} m_{n+1,n}= \left(\frac{\sigma_{00}^{2}\omega_{1}+\sigma_{01}+\sigma_{10}\omega_{0}} {(1-\sigma_{00}\omega_{0})\sigma_{00}}\right)m_{n+1,n+1} \end{displaymath} \item \emph{induction hp} assume that, for $n-k=n-1$, ie. $k=1$, if: \begin{displaymath} m_{n+1,2}=\sum_{i\in\mathbb{N}}{\sum_{j\in\mathbb{N}}{\sigma_{ij}m_{n-i,1+j}}} \end{displaymath} then there exists a sequence $\vect{\hat{a}}$ such that: \begin{displaymath} m_{n+1,2}=\hat{a}_{0}m_{n1}+\hat{a}_{1}m_{n,2}+\ldots+\hat{a}_{j}m_{n,1+j} \end{displaymath} where $1+j=n$; \item \emph{induction step} show that, for $n-k=n$, ie. $k=0$, if: \begin{displaymath} m_{n+1,1}=\sum_{i\in\mathbb{N}}{\sum_{j\in\mathbb{N}}{\sigma_{ij}m_{n-i,j}}} \end{displaymath} then there exists a sequence $\vect{a}$ such that: \begin{displaymath} m_{n+1,1}=a_{0}m_{n0}+a_{1}m_{n,1}+\ldots+a_{n}m_{n,n} \end{displaymath} Begin by expanding the assumption: \begin{displaymath} \hspace{-2cm} \begin{split} m_{n+1,1}&=\sigma_{00}m_{n0}+\sigma_{01}m_{n1}+\sigma_{02}m_{n2}+\ldots +\sigma_{0,n-2}m_{n,n-2}+\sigma_{0,n-1}m_{n,n-1}+\sigma_{0n}m_{nn}\\ &+\sigma_{10}m_{n-1,0}+\sigma_{11}m_{n-1,1}+\sigma_{12}m_{n-1,2}+\ldots +\sigma_{1,n-2}m_{n-1,n-2}+\sigma_{1,n-1}m_{n-1,n-1}\\ &+\sigma_{20}m_{n-2,0}+\sigma_{21}m_{n-2,1}+\sigma_{22}m_{n-2,2}+\ldots+\sigma_{2,n-2}m_{n-2,n-2}\\ &\ldots\\ &+\sigma_{n-1,0}m_{10}+\sigma_{n-1,1}m_{11}\\ &+\sigma_{n0}m_{00}\\ \end{split} \end{displaymath} from the bottom line of the previous sum expansion, keep applying \autoref{thm:characterization:next:row} to every coefficient $m_{rc}$, for $r\in\lbrace0,\ldots,n-1\rbrace$ and, consequently, $c\in\lbrace0,\ldots,r\rbrace$. When every coefficient $m_{n-1,c}$, for $c\in\lbrace0,\ldots,n-1\rbrace$, has been expanded, coefficient $m_{n+1,1}$ is a combination of coefficients $\lbrace m_{n0}\rbrace\cup\lbrace m_{n,1},m_{n,2}\ldots,m_{n,n}\rbrace$ using a sequence $\vect{\beta}$, namely: \begin{displaymath} m_{n+1,1}=\beta_{0}m_{n0}+\beta_{1}m_{n1}+\ldots+\beta_{n}m_{nn} \end{displaymath} By induction hypothesis, there exists a sequence $\vect{\hat{a}}$ such that: \begin{displaymath} m_{n+1,2}=\hat{a}_{0}m_{n1}+\hat{a}_{1}m_{n2}+\ldots+\hat{a}_{n-1}m_{nn} \end{displaymath} so, we can build a sequence $\vect{a}$ as follows: \begin{displaymath} \vect{a}=\left(\hat{a}_{0},\hat{a}_{1},\hat{a}_{2},\ldots,\hat{a}_{n-1},\beta_{n}\right) \end{displaymath} where $\beta_{n}$ can be possibly different from $\hat{a}_{n-1}$. Sequence $\vect{a}$ is the $A$-sequence for matrix $\lbrace m_{nk}\rbrace_{n,k\in\mathbb{N}}$, therefore $\mathcal{M}$ is a Riordan array, as required. \end{itemize} \end{proof}
{ "alphanum_fraction": 0.5650272569, "avg_line_length": 49.0970588235, "ext": "tex", "hexsha": "6a75a0553567020a26061303ac944941dca751a2", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "0d82bfcc82c92512d0795f286256a19f39b9b1f9", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "massimo-nocentini/master-thesis", "max_forks_repo_path": "classicthesis/Chapters/back-to-the-basics/sequences.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "0d82bfcc82c92512d0795f286256a19f39b9b1f9", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "massimo-nocentini/master-thesis", "max_issues_repo_path": "classicthesis/Chapters/back-to-the-basics/sequences.tex", "max_line_length": 137, "max_stars_count": null, "max_stars_repo_head_hexsha": "0d82bfcc82c92512d0795f286256a19f39b9b1f9", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "massimo-nocentini/master-thesis", "max_stars_repo_path": "classicthesis/Chapters/back-to-the-basics/sequences.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 6078, "size": 16693 }
\documentclass{article} \usepackage{color} \usepackage{bussproofs} \begin{document} \paragraph{Exercise 3.3.4} \color{red} TODO \color{black} \paragraph{Exercise 3.5.5} Proof 3.3.4 uses structural induction. \paragraph{Exercise 3.5.10} \AxiomC{$t \longrightarrow t'$} \UnaryInfC{$t \longrightarrow^* t'$} \DisplayProof \hspace{3em} \AxiomC{$t \longrightarrow^* t'$} \AxiomC{$t' \longrightarrow^* t''$} \BinaryInfC{$t \longrightarrow^* t''$} \DisplayProof \end{document}
{ "alphanum_fraction": 0.723628692, "avg_line_length": 20.6086956522, "ext": "tex", "hexsha": "494227003dcf4451f5796b8d38d84c2f33445359", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "236c73ff32ce81191691ed7a7945401be7047c35", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "ayberkt/TAPL", "max_forks_repo_path": "exercises/chapter-3.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "236c73ff32ce81191691ed7a7945401be7047c35", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "ayberkt/TAPL", "max_issues_repo_path": "exercises/chapter-3.tex", "max_line_length": 38, "max_stars_count": 1, "max_stars_repo_head_hexsha": "236c73ff32ce81191691ed7a7945401be7047c35", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "ayberkt/TAPL", "max_stars_repo_path": "exercises/chapter-3.tex", "max_stars_repo_stars_event_max_datetime": "2017-02-26T06:13:17.000Z", "max_stars_repo_stars_event_min_datetime": "2017-02-26T06:13:17.000Z", "num_tokens": 170, "size": 474 }
\chapter{Introduction} Here is the example of contents. \section{Sample of Section} Integral imaging has been studied in various research fields \see{ii}. Figure \ref{fig:logo} shows the logo of HKNU. \begin{figure}[htbp] \centering \includegraphics[width=5cm]{hknu.png} \caption{Logo of HKNU} \label{fig:logo} \end{figure} \subsection{Sample of SubSection} Table \ref{tab:comp_algorithm} shows the comparison of the proposed algorithms. \begin{table}[htbp] \centering \caption{Comparison of our algorithms} \begin{tabular}{ll} \toprule \textbf{Algorithm} & \textbf{Result} \\ \midrule A & Good \\ B & Bad \\ \bottomrule \end{tabular}% \label{tab:comp_algorithm}% \end{table}% \subsubsection{Sample of SubSubSection} Writing manuscripts would be a long long journey. God Bless.
{ "alphanum_fraction": 0.6946386946, "avg_line_length": 27.6774193548, "ext": "tex", "hexsha": "a11ae12af11c3603c47a031535a55b5e26b5911d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5f28102e1051f64e4d81314cfd20c27d29bd6cc5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "kotaro-inoue/hknu_latex_template", "max_forks_repo_path": "src/introduction.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5f28102e1051f64e4d81314cfd20c27d29bd6cc5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "kotaro-inoue/hknu_latex_template", "max_issues_repo_path": "src/introduction.tex", "max_line_length": 79, "max_stars_count": null, "max_stars_repo_head_hexsha": "5f28102e1051f64e4d81314cfd20c27d29bd6cc5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "kotaro-inoue/hknu_latex_template", "max_stars_repo_path": "src/introduction.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 247, "size": 858 }
\section*{Chinese Translations} \begin{etaremune} \item E. Vance (2018), \href{https://huanqiukexue.com/a/zazhi/2018/2018/1130/28180.html}{Earthquakes in the sky}, 11, \textit{Huanqiukexue} (Chinese version of \textit{Scientific American}). \end{etaremune}
{ "alphanum_fraction": 0.7368421053, "avg_line_length": 33.25, "ext": "tex", "hexsha": "31ad68cdaed5c288dbcd58805ee671b3b7f249d1", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "206a359ff7fcb036befa59fb9e1ce82f4406a9a0", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "core-man/cv", "max_forks_repo_path": "en/translations.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "206a359ff7fcb036befa59fb9e1ce82f4406a9a0", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "core-man/cv", "max_issues_repo_path": "en/translations.tex", "max_line_length": 115, "max_stars_count": null, "max_stars_repo_head_hexsha": "206a359ff7fcb036befa59fb9e1ce82f4406a9a0", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "core-man/cv", "max_stars_repo_path": "en/translations.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 98, "size": 266 }
\section{Eukaryotic cells}
{ "alphanum_fraction": 0.7586206897, "avg_line_length": 7.25, "ext": "tex", "hexsha": "30f57b614e60a11f5fb955f8339729378e6fac87", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_path": "src/pug/theory/biology/singleCell/06-00-Eukaryotic.tex", "max_issues_count": 6, "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_path": "src/pug/theory/biology/singleCell/06-00-Eukaryotic.tex", "max_line_length": 26, "max_stars_count": null, "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_path": "src/pug/theory/biology/singleCell/06-00-Eukaryotic.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 10, "size": 29 }
\section{Additional notes} \subsection{Language to Number Translation Tasks} \label{sec:appendix:lanauge-to-number} In the NALU paper, they include a ``Language to Number Translation Tasks'' that appears to perform multiplication, by applying a recurrent NALU. However, after contracting the authors this turns out not to be the case. \begin{itemize} \item Question: In 4.3, how big is the embedding layer output dimensionality?\\ Answer: From memory - i think it was 256. \item Question: In 4.3, how big is the LSTM layer output dimensionality?\\ Answer: From memory - I think it was 10 (small) \item Question: In 4.3, what output and input activation does the LSTM layer have, tanh? \\ Answer: It uses the standard LSTM construction - default in Tensorflow. \item Question: In 4.3, is the Linear/NAC/NALU layer only used for the final element in the sequence? See diagram.\\ Answer: Correct. \end{itemize} \begin{figure}[h] \centering \includegraphics[scale=0.5]{graphics/language-to-numbers.png} \caption{Diagram included in the email.} \end{figure} Based on these answers, we don't believe ``Language to Number Translation Tasks'' tests multiplication in any meaningful way. There is nothing inherently wrong in that, as \citet{trask-nalu} don't state this explicitly either, we just wish to clarify this. Our reasoning is that, an embedding of 256, is more than enough to fully describe every single token, keep in mind there are only 29 tokens in total. As such there is no reason why an LSTM layer wouldn't be able to solve this on its own (a simplified LSTM solving this task is seen in \eqref{eq:langauge-to-numbers-lstm}). Although, due to the non-linear activations, that are undesired in this case, the LSTM would need to downscale the values 0-1000 to be in the linear range of $\tanh(\cdot)$, and there would need to be final layer that upscales to 0-1000. We believe that the NALU layer serves this purpose and thus does not have any value in terms of arithmetics. \begin{equation} \begin{aligned} h_t &= &h_{t-1}\ \cdot\ &f_t &&+ &\tilde{h}_{t}\ \ \cdot\ &i_t \\ h_t &= &\begin{bmatrix} h_{t-1} \\ 0 \\ 0 \end{bmatrix}^T &\left(I(x_t = \texttt{100})\begin{bmatrix}100 \\ 1 \\ 1\end{bmatrix}\right)\ &&+ &\begin{bmatrix} 0 \\ h_{t-1} \\ x_t \end{bmatrix}^T &\left(I(x_t \not= \texttt{100})\begin{bmatrix}100 \\ 1 \\ 1\end{bmatrix}\right) \end{aligned} \label{eq:langauge-to-numbers-lstm} \end{equation}
{ "alphanum_fraction": 0.7424928013, "avg_line_length": 62.3333333333, "ext": "tex", "hexsha": "fb33a135d997fab5d3175ddba80f68ff5ee25e1c", "lang": "TeX", "max_forks_count": 19, "max_forks_repo_forks_event_max_datetime": "2021-09-03T08:32:38.000Z", "max_forks_repo_forks_event_min_datetime": "2019-12-21T15:58:44.000Z", "max_forks_repo_head_hexsha": "f9de9d004bb2dc2ee28577cd1760d0a00c185836", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "wlm2019/Neural-Arithmetic-Units", "max_forks_repo_path": "paper/appendix/nalu-author-comments.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "f9de9d004bb2dc2ee28577cd1760d0a00c185836", "max_issues_repo_issues_event_max_datetime": "2019-12-03T12:40:21.000Z", "max_issues_repo_issues_event_min_datetime": "2019-12-03T12:40:21.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "wlm2019/Neural-Arithmetic-Units", "max_issues_repo_path": "paper/appendix/nalu-author-comments.tex", "max_line_length": 669, "max_stars_count": 147, "max_stars_repo_head_hexsha": "f9de9d004bb2dc2ee28577cd1760d0a00c185836", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "wlm2019/Neural-Arithmetic-Units", "max_stars_repo_path": "paper/appendix/nalu-author-comments.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-16T02:51:18.000Z", "max_stars_repo_stars_event_min_datetime": "2019-10-07T11:01:54.000Z", "num_tokens": 696, "size": 2431 }
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage{graphicx} \usepackage{fancyvrb} \graphicspath{{./images}} \title{COS30031 Games Programming\\Research Project Report} \author{Daniel Coady 102084174} \date{17/11/2021} \begin{document} \maketitle \pagebreak \tableofcontents \pagebreak \begin{abstract} Game architecture design is not a problem with a one size fits all solution. There are many factors that dictate how "good" a design is, but most of them are entirely subjective and/or situational. What might work for one person or problem, may not work for another. As such, it's important to assess the advantages and disadvantages of each design so that you may utilise the ones that best apply to your situation. \end{abstract} \section{Introduction} \subsection{Background} Game architecture is an art, one that is truly hard to learn and master. In fact, there are many experts and experienced developers who disagree frequently on the topic of how the backbone of games should be structured. This happens for a variety of reasons, but more often than not we will see two aspects of various architectures compared: the usability or maintainability, and the performance. When we talk about the usability or maintainability of something, we refer to how easy it is to work with. Of interest to us is the ergonomics related to developing with and extending on a given architecture as the needs of a solution expands in scope. Performance on the other hand is far more straightforward--how fast is it? Both aspects are incredibly important when weighing up which architecture is most appropriate to you, your project, and your needs. \subsection{Purpose of This Research} Using the metrics outlined earlier, I aim to compare and contrast four different architectures. This will involve deep dives into every architecture, dissecting each of them in a variety of ways. My hope is that through this research presented in this report, the reader may be able to: \begin{itemize} \item Understand what each architecture is, and how they work \item Understand the purpose of each architecture \item Understand the use case for each architecture \item Come to their own conclusions regarding choices in game architecture \end{itemize} \section{Methodology} In order to test the various aspects of each of these architectures, I have had to formulate a series of tests and establish a common testing environment for each of these tests to be run within. \subsection{Tests} All tests will be done with a simple set of entities. These entities will be represented by squares in a graphical window, and they will move with a fixed velocity. When an entity reaches the edge of the window, it will then loop back around to the other side of the window. To add an extra layer of complexity to the processing of the entities, a random amount of entities will also colour shift while moving. Data will be tracked in the form of average cycles per second and time to complete in seconds. To ensure we collect as much useful data as possible, I will extend this basis for testing in three key ways: \subsubsection{Static} The test will be run with 250,000 entities in the simulation over the course of one minute. The purpose of this test is to understand at a high level how compiler optimisation level affects the speed of each architecture. Additionally, the test will be run through valgrind's cachegrind tool in order to obtain cache profiling information for each architecture. \subsubsection{Ramp-up} There will be multiple tests run for each architecture, starting at 100,000 entities and adding 100,000 with each subsequent test until 500,000 entities is reached. Each test will be run over the course of one minute. The purpose of this test is to understand at a high level how a given architecture scales up given n amount of entities. \subsubsection{Dynamic Ramp-up} A single test will be run for each architecture that starts with 0 entities. For each cycle performed, an entity will be added to the architecture until a limit of 100,000 entities is hit. Once this limit has been hit, entities will start being removed from the architecture until there are none left, and the time taken to execute will be measured. The purpose of this test is to understand at a high level how the overhead introduced by the creation and removal of entities from an architecture affect the execution time of an application. \subsection{Environment} In order to test each of these architectures, a common environment for them to run in has been established. This will take form of a simple front-end abstraction I have written on top of the OpenGL 4.3 Core API, using GLFW for windowing and other miscellaneous functionality. All code used for the environment and development of tests will be written in C++, targeting the C++17 standard at a maximum. Something to note here is that I have elected to use OpenGL 4.3 Core. This is because it is the first version of the OpenGL specification to add compute shaders to the core profile, which will become important later on for one of our architectures. \section{Introduction to Tested Architectures} This research has chosen to focus on four key architectures. The rationale behind this is while it might not be exhaustive, it will provide meaningful data points to inform how different styles and implementations of architecture can affects the usability/maintainability and performance of your game. All code mentioned will be available on GitHub\footnote{https://github.com/pondodev/research-project} under the Unlicense License. \subsection{Architecture A--Pure Object Oriented} Taking a more traditional and plain object oriented approach, we see what you might expect from a more standard application's codebase. Since it is very pure as far as object oriented architecture goes, much of the same goals of the paradigm carry over to this architecture--that is, we want to maximise code cohesion, minimise code coupling, and reduce overall code duplication. Most of this is to ensure that at a high level, the codebase is as maintainable as possible. There are a few notable parts that make up this architecture: \begin{itemize} \item Engine \item[] The brains of the operation. Manages entities and their associated memory. \item Entity \item[] A base class for all entities in the architecture to inherit from. \item Colour Shift Entity \item[] A class which derives from the entity base class to add colour shifting functionality. \end{itemize} \begin{figure} \centering \begin{BVerbatim} class Entity { public: Entity( unsigned int _id, glm::vec2 _position, glm::vec2 _velocity, glm::vec3 _color ); virtual void update(); unsigned int get_id(); glm::vec2 get_position(); glm::vec3 get_color(); protected: glm::vec3 color; private: unsigned int id; glm::vec2 position; glm::vec2 velocity; }; \end{BVerbatim} \caption{Architecture A Entity class declaration} \label{arch_a_entity_header} \end{figure} \begin{figure} \centering \begin{BVerbatim} class ColorShiftEntity : public Entity { public: ColorShiftEntity( unsigned int _id, glm::vec2 _position, glm::vec2 _velocity, glm::vec3 _color, glm::vec3 _color_velocity ); void update() override; private: glm::vec3 color_velocity; }; \end{BVerbatim} \caption{Architecture A ColorShiftEntity class declaration} \label{arch_a_color_shift_entity_header} \end{figure} \begin{figure} \centering \begin{BVerbatim} class Engine { public: ~Engine(); unsigned int add_entity( glm::vec2 _position, glm::vec2 _velocity, glm::vec3 _color ); unsigned int add_entity( glm::vec2 _position, glm::vec2 _velocity, glm::vec3 _color, glm::vec3 _color_velocity ); void remove_entity( unsigned int id ); void pop_entity(); std::vector<Entity*> get_entities(); void update(); private: unsigned int current_entity_id = 0; std::vector<Entity*> entities; }; \end{BVerbatim} \caption{Architecture A Engine class declaration} \label{arch_a_engine_header} \end{figure} Starting with the Entity class in figure \ref{arch_a_entity_header}, it contains some basic information required to render a given entity to the provided render target--a 2D position, a 2D velocity, and a colour with RGB components. We also have some simple getter methods for acquiring the private and protected members of the class, and an update method which will simply move the entity's position by the given velocity vector. The ColorShiftEntity class in figure \ref{arch_a_color_shift_entity_header} then inherits from the Entity class and adds a new member to store colour velocity. This colour velocity is also an RGB value which dictates how the colour of an entity should shift with every update. It's for this reason that we have an override for the update method, which will perform the same update functionality as the base Entity class but will also apply the colour velocity. Finally, the Engine class in figure \ref{arch_a_engine_header} which uses a factory-like pattern. The engine is what a programmer would primarily be interfacing with in order to operate the architecture. This is because it is where one would create, manage, and update entities. It provides a method with two overrides to create an entity--one override for regular entities and one override for colour shifting entities. There is also a method to update every single entity managed by the engine. \subsection{Architecture B--Object Oriented Component Pattern} This is an architecture many might be familiar with from general purpose engines such as Unity or Unreal Engine 4. The core idea at play is that pure object oriented approaches to game architecture aren't particularly conducive to how games are generally programmed. In particular, there is the idea of an entity having a series of "traits" which apply to it which in a purely object oriented architecture would normally be implemented through means of inheritance. However, this can be prone to many different types of issues such as deep, complicated inheritance trees or ambiguity introduced through diamond inheritance. To add to this, it becomes significantly less feasible to implement this kind of architecture with a language such as C\# which does not allow for multiple inheritance and would instead require you to use interfaces. This is the core rationale behind the component pattern in object oriented game architecture design--reduce the amount of inheritance required by storing a collection of components which define traits of an entity, inside of an entity. This also means that in languages like C\# which do not support multiple inheritance, we can still implement this pattern in a clean and maintainable manner. My implementation has 4 key parts: \begin{itemize} \item Engine \item[] Much like the pure object oriented architecture, manages the creation, removal, and updating of entities. \item Entity \item[] A very bare bones class that has an id and collection of components. \item Component Base \item[] The base class from which all components that can define traits or behaviour of an entity will inherit from. \item Components \item[] Child classes of the component base class that will define specific traits or behaviour for entities it is applied to. \end{itemize} \begin{figure} \centering \begin{BVerbatim} class Entity { public: Entity(); ~Entity(); unsigned int get_id(); void add_component( Component* c ); void remove_component( unsigned int id ); template <typename T> std::optional<T*> get_component() { std::optional<T*> to_return; for ( auto c : components ) { if ( typeid(*c) == typeid(T) ) { to_return = (T*)c; break; } } return to_return; } private: static inline unsigned int next_id; unsigned int id; std::vector<Component*> components; }; \end{BVerbatim} \caption{Architecture B Entity class declaration} \label{arch_b_entity_header} \end{figure} \begin{figure} \centering \begin{BVerbatim} class Component { public: Component(); virtual unsigned int get_id(); // virtual so RTTI works private: static inline unsigned int next_id; unsigned int id; }; \end{BVerbatim} \caption{Architecture B ComponentBase class declaration} \label{arch_b_component_base_header} \end{figure} \begin{figure} \centering \begin{BVerbatim} class MovementComponent : public Component { public: MovementComponent( glm::vec2 _pos, glm::vec2 _vel ); void move(); glm::vec2 pos; private: glm::vec2 vel; }; class ColorComponent : public Component { public: ColorComponent( glm::vec3 _value ); void apply_velocity( glm::vec3 vel ); glm::vec3 value; }; class ColorVelocityComponent : public Component { public: ColorVelocityComponent( glm::vec3 _value ); glm::vec3 value; }; \end{BVerbatim} \caption{Architecture B Component class declarations} \label{arch_b_components_header} \end{figure} \begin{figure} \centering \begin{BVerbatim} class Engine { public: ~Engine(); void add_entity( Entity* entity ); void remove_entity( unsigned int id ); void pop_entity(); std::optional<Entity*> get_entity( unsigned int id ); std::vector<Entity*> get_all_entities(); void update(); private: std::vector<Entity*> entities; }; \end{BVerbatim} \caption{Architecture B Engine class declaration} \label{arch_b_engine_header} \end{figure} The Entity class is incredibly simple for the most part: an id, a vector of components belonging to this entity, and methods to add/remove components from an entity. There is one rather complex thing, however, and that is the template method for getting a component from an entity. This method uses run time type information (RTTI for short) to get an arbitrary component of a given type from the vector of components. The ComponentBase class in figure \ref{arch_b_component_base_header}, as previously mentioned, will be the base class that all components inherit from. It's simple for the most part, only really providing one getter method for the component's id. However one curiosity here is that the method to get the id is virtual, but we never override it in any of the classes that derive from it. This is due to a quirk of C++ and it's design since type information isn't available during runtime in the same way we might expect it to be in something like C\# and it's reflection feature. To get around this, there is RTTI which can give us partial information on types during runtime, but it requires us to do two things: provide a pointer, and implement at least one virtual method on the base class to be inherited from. Components as seen in figure \ref{arch_b_components_header} all inherit from the ComponentBase class. This allows us to polymorphically store them in a collection (which is exactly what we do in the Entity class) while still providing unique functionality per component. Finally there is the Engine class in figure \ref{arch_b_engine_header}. Much like the pure object oriented architecture, the engine will manage all entities and their associated memory appropriately. However, a key difference is that creation of the entity is now an external responsibility to the engine and a programmer would then need to register the created entity with the engine. This is something I would classify as a design flaw in the implementation which, if used in the real world, should be remedied. However for the purposes of these tests it will be more than adequate to collect the necessary data. \subsection{Architecture C--Entity Component System} While by no means a new approach to game architecture with uses of it dating back to 2001-2003\footnote{http://t-machine.org/index.php/2007/09/03/entity-systems-are-the-future-of-mmog-development-part-1/}, it is only in recent times where we have seen it truly come into it's own. Entity component systems, often shortened down to ECS, are a performance first architecture design which foregoes object oriented design in favour of a data oriented design. The key difference between the two paradigms is that while object oriented design prioritises concepts such as ownership through means of abstraction and encapsulation, data oriented design chooses to separate data entirely from it's functionality. This is done through the 3 parts of an ECS implementation: \begin{itemize} \item Entities \item[] Abstract identifiers that are used to denote ownership over a registered component in the architecture. \item Components \item[] Simple, tightly packed data packets that can be considered a "state" belonging to an entity. \item Systems \item[] Functionality which operates over a collection of entities and it's select components that are registered to a given system, applying logic and functionality to the components. \end{itemize} \begin{figure}[h] \centering \begin{BVerbatim} typedef uint32_t Entity; typedef enum { Movable = 0b100, Color = 0b010, ColorVelocity = 0b001 } ComponentFlag; \end{BVerbatim} \caption{Architecture C typedefs} \label{arch_c_typedefs} \end{figure} \begin{figure} \centering \begin{BVerbatim} struct MovableComponent { float pos_x; float pos_y; float vel_x; float vel_y; }; struct ColorComponent { float r; float g; float b; }; struct ColorVelocityComponent { float r; float g; float b; }; \end{BVerbatim} \caption{Architecture C component struct declarations} \label{arch_c_components_header} \end{figure} \begin{figure} \centering \begin{BVerbatim} class Engine { public: Engine(); ~Engine(); std::optional<Entity> add_entity(); void remove_entity( Entity id ); int entity_has_component( Entity id, ComponentFlag component ); void movement_system(); void color_shift_system(); MovableComponent* add_movable_component( Entity id ); ColorComponent* add_color_component( Entity id ); ColorVelocityComponent* add_color_velocity_component( Entity id ); MovableComponent* get_movable_component( Entity id ); ColorComponent* get_color_component( Entity id ); ColorVelocityComponent* get_color_velocity_component( Entity id ); private: std::queue<Entity> available_entities; std::vector<Entity> movement_system_entities; std::vector<Entity> color_shift_system_entities; ComponentFlag* entity_component_flags; MovableComponent* movable_components; ColorComponent* color_components; ColorVelocityComponent* color_velocity_components; }; \end{BVerbatim} \caption{Architecture C Engine class declaration} \label{arch_c_engine_header} \end{figure} The first thing of note are a couple of simple typedefs that have been created, as seen in figure \ref{arch_c_typedefs}. The first is for entities, which defines their id as an unsigned 32-bit integer. The second is an enum flag set which we can bitwise-or together to indicate what components have been set on an entity. Components, as mentioned before, are incredibly simple. Figure \ref{arch_c_components_header} shows that each component is nothing more than a struct with primitive data within. The Engine class in figure \ref{arch_c_engine_header} is actually somewhat of a simplification of what you might expect in a more general purpose ECS implementation. This is because normally there are separate manager classes for each of the parts of an ECS, however for the purpose of this research I have elected to simplify it down for demonstration purposes and simplicity. Within this Engine class we have some methods for adding and removing entities, functionality normally delegated to an entity manager. We also have parts of what might be expected from a component manager in the form of methods to add/get components for an entity and tightly packed arrays which contain the components. The final piece of the puzzle then are the systems, which come in two parts. First are the two system methods which perform the actual functionality of a given system, and second are the vectors which contain all the entity ids which are registered to be worked on by a given system. As a final note, I'll mention that I owe a great deal to Austin Morlan as their ECS implementation\footnote{https://austinmorlan.com/posts/entity\_component\_system/} inspired much of my own. \subsection{Architecture D--Entity Component System With\\Compute Shaders} This is identical to Architecture A with one key difference: we use the GPU to run the systems in the ECS implementation. In theory this should provide incredible performance benefits to our architecture since GPUs are incredibly fast at parallel floating point computations. However, much to my own disappointment, I was not able to implement it fully into an ECS implementation for reasons that will be mentioned later. I have, however, successfully written computer shaders in OpenGL\footnote{https://github.com/pondodev/opengl\_compute} so will be able to comment more on other aspects of it. This does unfortunately mean that I have been unable to collect any data on it and as such, will not be able to report on the performance of it. \clearpage \section{Data} The following sections display the data visualisations for each test run. \subsection{Architecture A} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture A Static (AMD Ryzen 5 3600).png} \caption{Architecture A Static Test (AMD Ryzen 5 3600)} \label{arch_a_static_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture A Static (Intel i5 8250U).png} \caption{Architecture A Static Test (Intel i5 8250U)} \label{arch_a_static_laptop} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture A Ramp Up (AMD Ryzen 5 3600).png} \caption{Architecture A Ramp Up Test (AMD Ryzen 5 3600)} \label{arch_a_ramp_up_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture A Ramp Up (Intel i5 8250U).png} \caption{Architecture A Ramp Up Test (Intel i5 8250U)} \label{arch_a_ramp_up_laptop} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture A Dynamic Ramp Up (AMD Ryzen 5 3600).png} \caption{Architecture A Dynamic Ramp Up Test (AMD Ryzen 5 3600)} \label{arch_a_dynamic_ramp_up_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture A Dynamic Ramp Up (Intel i5 8250U).png} \caption{Architecture A Dynamic Ramp Up Test (Intel i5 8250U)} \label{arch_a_dynamic_ramp_up_laptop} \end{figure} \clearpage \subsection{Architecture B} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture B Static (AMD Ryzen 5 3600).png} \caption{Architecture B Static Test (AMD Ryzen 5 3600)} \label{arch_b_static_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture B Static (Intel i5 8250U).png} \caption{Architecture B Static Test (Intel i5 8250U)} \label{arch_b_static_laptop} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture B Ramp Up (AMD Ryzen 5 3600).png} \caption{Architecture B Ramp Up Test (AMD Ryzen 5 3600)} \label{arch_b_ramp_up_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture B Ramp Up (Intel i5 8250U).png} \caption{Architecture B Ramp Up Test (Intel i5 8250U)} \label{arch_b_ramp_up_laptop} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture B Dynamic Ramp Up (AMD Ryzen 5 3600).png} \caption{Architecture B Dynamic Ramp Up Test (AMD Ryzen 5 3600)} \label{arch_b_dynamic_ramp_up_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture B Dynamic Ramp Up (Intel i5 8250U).png} \caption{Architecture B Dynamic Ramp Up Test (Intel i5 8250U)} \label{arch_b_dynamic_ramp_up_laptop} \end{figure} \clearpage \subsection{Architecture C} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture C Static (AMD Ryzen 5 3600).png} \caption{Architecture C Static Test (AMD Ryzen 5 3600)} \label{arch_c_static_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture C Static (Intel i5 8250U).png} \caption{Architecture C Static Test (Intel i5 8250U)} \label{arch_c_static_laptop} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture C Ramp Up (AMD Ryzen 5 3600).png} \caption{Architecture C Ramp Up Test (AMD Ryzen 5 3600)} \label{arch_c_ramp_up_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture C Ramp Up (Intel i5 8250U).png} \caption{Architecture C Ramp Up Test (Intel i5 8250U)} \label{arch_c_ramp_up_laptop} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture C Dynamic Ramp Up (AMD Ryzen 5 3600).png} \caption{Architecture C Dynamic Ramp Up Test (AMD Ryzen 5 3600)} \label{arch_c_dynamic_ramp_up_pc} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{Architecture C Dynamic Ramp Up (Intel i5 8250U).png} \caption{Architecture C Dynamic Ramp Up Test (Intel i5 8250U)} \label{arch_c_dynamic_ramp_up_laptop} \end{figure} \clearpage \subsection{Architecture Comparisons} \label{arch_comparison} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{O3 Static Benchmarks (AMD Ryzen 5 3600).png} \caption{Static Tests (AMD Ryzen 5 3600)} \label{pc_static_tests} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{O3 Static Benchmarks (Intel i5 8250U).png} \caption{Static Tests (Intel i5 8250U)} \label{laptop_static_tests} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{O3 Ramp Up Benchmarks (AMD Ryzen 5 3600).png} \caption{Ramp Up Tests (AMD Ryzen 5 3600)} \label{pc_ramp_up_tests} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{O3 Ramp Up Benchmarks (Intel i5 8250U).png} \caption{Ramp Up Tests (Intel i5 8250U)} \label{laptop_ramp_up_tests} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{O3 Dynamic Ramp Up Benchmarks (AMD Ryzen 5 3600).png} \caption{Dynamic Ramp Up Tests (AMD Ryzen 5 3600)} \label{pc_dynamic_ramp_up_tests} \end{figure} \begin{figure}[!h] \centering \includegraphics[scale=0.5]{O3 Dynamic Ramp Up Benchmarks (Intel i5 8250U).png} \caption{Dynamic Ramp Up Tests (Intel i5 8250U)} \label{laptop_dynamic_ramp_up_tests} \end{figure} \clearpage \subsection{Cachegrind Output} \begin{figure}[!h] \centering \begin{BVerbatim} I refs: 15,279,334,589 I1 misses: 768,022 LLi misses: 574,626 I1 miss rate: 0.01% LLi miss rate: 0.00% D refs: 5,911,147,698 (4,233,875,171 rd + 1,677,272,527 wr) D1 misses: 230,427,009 ( 229,725,605 rd + 701,404 wr) LLd misses: 65,787,658 ( 65,326,385 rd + 461,273 wr) D1 miss rate: 3.9% ( 5.4% + 0.0% ) LLd miss rate: 1.1% ( 1.5% + 0.0% ) LL refs: 231,195,031 ( 230,493,627 rd + 701,404 wr) LL misses: 66,362,284 ( 65,901,011 rd + 461,273 wr) LL miss rate: 0.3% ( 0.3% + 0.0% ) \end{BVerbatim} \caption{Architecture A cachegrind output (AMD Ryzen 5 3600)} \label{arch_a_cachegrind_pc} \end{figure} \begin{figure}[!h] \centering \begin{BVerbatim} I refs: 11,673,979,882 I1 misses: 108,417 LLi misses: 45,870 I1 miss rate: 0.00% LLi miss rate: 0.00% D refs: 4,506,264,005 (3,233,764,491 rd + 1,272,499,514 wr) D1 misses: 174,092,870 ( 173,478,074 rd + 614,796 wr) LLd misses: 172,471,117 ( 171,946,373 rd + 524,744 wr) D1 miss rate: 3.9% ( 5.4% + 0.0% ) LLd miss rate: 3.8% ( 5.3% + 0.0% ) LL refs: 174,201,287 ( 173,586,491 rd + 614,796 wr) LL misses: 172,516,987 ( 171,992,243 rd + 524,744 wr) LL miss rate: 1.1% ( 1.2% + 0.0% ) \end{BVerbatim} \caption{Architecture A cachegrind output (Intel i5 8250U)} \label{arch_a_cachegrind_laptop} \end{figure} \begin{figure}[!h] \centering \begin{BVerbatim} I refs: 17,093,466,994 I1 misses: 285,064 LLi misses: 238,882 I1 miss rate: 0.00% LLi miss rate: 0.00% D refs: 6,048,221,487 (5,025,865,701 rd + 1,022,355,786 wr) D1 misses: 221,749,747 ( 220,647,937 rd + 1,101,810 wr) LLd misses: 124,021,475 ( 123,094,901 rd + 926,574 wr) D1 miss rate: 3.7% ( 4.4% + 0.1% ) LLd miss rate: 2.1% ( 2.4% + 0.1% ) LL refs: 222,034,811 ( 220,933,001 rd + 1,101,810 wr) LL misses: 124,260,357 ( 123,333,783 rd + 926,574 wr) LL miss rate: 0.5% ( 0.6% + 0.1% ) \end{BVerbatim} \caption{Architecture B cachegrind output (AMD Ryzen 5 3600)} \label{arch_b_cachegrind_pc} \end{figure} \begin{figure}[!h] \centering \begin{BVerbatim} I refs: 14,178,150,149 I1 misses: 110,258 LLi misses: 47,501 I1 miss rate: 0.00% LLi miss rate: 0.00% D refs: 5,006,378,567 (4,162,894,790 rd + 843,483,777 wr) D1 misses: 181,408,674 ( 180,305,799 rd + 1,102,875 wr) LLd misses: 179,883,519 ( 178,870,475 rd + 1,013,044 wr) D1 miss rate: 3.6% ( 4.3% + 0.1% ) LLd miss rate: 3.6% ( 4.3% + 0.1% ) LL refs: 181,518,932 ( 180,416,057 rd + 1,102,875 wr) LL misses: 179,931,020 ( 178,917,976 rd + 1,013,044 wr) LL miss rate: 0.9% ( 1.0% + 0.1% ) \end{BVerbatim} \caption{Architecture B cachegrind output (Intel i5 8250U)} \label{arch_b_cachegrind_laptop} \end{figure} \begin{figure}[!h] \centering \begin{BVerbatim} I refs: 20,674,747,170 I1 misses: 1,059,201 LLi misses: 252,983 I1 miss rate: 0.01% LLi miss rate: 0.00% D refs: 6,047,394,193 (4,392,478,824 rd + 1,654,915,369 wr) D1 misses: 227,381,637 ( 226,639,670 rd + 741,967 wr) LLd misses: 826,630 ( 419,239 rd + 407,391 wr) D1 miss rate: 3.8% ( 5.2% + 0.0% ) LLd miss rate: 0.0% ( 0.0% + 0.0% ) LL refs: 228,440,838 ( 227,698,871 rd + 741,967 wr) LL misses: 1,079,613 ( 672,222 rd + 407,391 wr) LL miss rate: 0.0% ( 0.0% + 0.0% ) \end{BVerbatim} \caption{Architecture C cachegrind output (AMD Ryzen 5 3600)} \label{arch_c_cachegrind_pc} \end{figure} \begin{figure}[!h] \centering \begin{BVerbatim} I refs: 13,928,271,322 I1 misses: 108,626 LLi misses: 45,786 I1 miss rate: 0.00% LLi miss rate: 0.00% D refs: 4,087,370,472 (2,971,977,825 rd + 1,115,392,647 wr) D1 misses: 152,070,396 ( 151,456,130 rd + 614,266 wr) LLd misses: 150,530,736 ( 149,987,229 rd + 543,507 wr) D1 miss rate: 3.7% ( 5.1% + 0.1% ) LLd miss rate: 3.7% ( 5.0% + 0.0% ) LL refs: 152,179,022 ( 151,564,756 rd + 614,266 wr) LL misses: 150,576,522 ( 150,033,015 rd + 543,507 wr) LL miss rate: 0.8% ( 0.9% + 0.0% ) \end{BVerbatim} \caption{Architecture C cachegrind output (Intel i5 8250U)} \label{arch_c_cachegrind_laptop} \end{figure} \clearpage \section{Analysis} \subsection{Architecture A} \subsubsection{Performance} It would appear through the data collected and presented in section \ref{arch_comparison} that Architecture A sits squarely in the middle of the road between all of the architectures being compared. No doubt, a part of this lies within the relative simplicity of the implementation compared to the others. Most notably, within the engine every entity is stored inside of the same vector. Since vectors are, according to the C++ standard, tightly packed collections, in theory the data contained within each entity could be dispatched to the cache for faster access. I must stress though that this is entirely in theory, since cache performance is not only CPU architecture dependant, but also manufacturer dependant. Differences in how cache pre-fetching occurs will massively affect whether a specific collection of data ends up being dispatched to the cache or not, which in turn can have a large impact on performance. There's also very little in the way of algorithmic complexity since all difference in functionality between the entities are handled through means of polymorphism. This allows us to simply perform a single loop over the collection of entities to achieve the desired functionality in O(n) time. \subsubsection{Usability and Maintainability} This is perhaps the largest drawback to a purely object oriented approach to game architecture. The problem is that the paradigm doesn't lend itself entirely to how games are generally structured, with what might be considered a many to many relationship between entities and traits. That is, many entities may have many traits, and many traits may be relevant to many entities. While this makes sense in more specific applications such as our entities that can either move or move and colour shift, as soon as you start scaling up to larger and larger scopes this quickly becomes unfeasible for everything. To be clear, I don't think this makes it an entirely useless idea to keep in mind while designing game architecture--quite the contrary actually. One of the core goals of object oriented programming is to reduce code repetition in order to increase maintainability of a given codebase. It's for this reason that when designing larger, encapsulating aspects of an architecture this way of thinking often becomes indispensible. In fact, it's something we see frequently in real world applications of component patterns or ECS. This is because of how well it lends itself to not specific parts of architecture, but rather the larger picture. Add to this that it seems like it can still maintain reasonable performance, and it makes for an exceptional candidate for the backbone of any other core architecture used for managing entities in a game. \subsection{Architecture B} \subsubsection{Performance} Again referring to section \ref{arch_comparison}, it's incredibly clear just how much worse this architecture performs. It reliably comes last in every test, and by incredibly large margins. Analysing figures \ref{arch_b_cachegrind_pc} and \ref{arch_b_cachegrind_laptop} we can see that there are a few cache misses which no doubt has an impact on performance, but I posit that the biggest impact on performance is actually the algorithmic complexity of the implementation. Consider the following implementation of the Engine class' update method: \bigskip \begin{BVerbatim} void Engine::update() { for ( auto e : entities ) { auto mov = e->get_component<MovementComponent>(); auto col = e->get_component<ColorComponent>(); auto col_vel = e->get_component<ColorVelocityComponent>(); if ( mov.has_value() ) mov.value()->move(); if ( col.has_value() && col_vel.has_value() ) { col.value()->apply_velocity( col_vel.value()->value ); } } } \end{BVerbatim} \bigskip This is a simple loop in O(n) time over a collection of entities, which in of itself is not unusual--every implementation does this for all of it's entities as well. The more interesting part for us however is the get get\_component calls, of which there are 3 of them. If we have a look at the implementation of this template method then we would find the following: \bigskip \begin{BVerbatim} template <typename T> std::optional<T*> get_component() { std::optional<T*> to_return; for ( auto c : components ) { if ( typeid(*c) == typeid(T) ) { to_return = (T*)c; break; } } return to_return; } \end{BVerbatim} \bigskip This is another algorithm which in this case has a worst case time complexity of O(n). This means that our update method now has a worst case time complexity of O(n(x+y+z)) which is, to put it bluntly, real bad. This is only compounded by the fact that not every entity will have a ColorVelocityComponent, which means that the search time for it on those entities will always be O(n). There are absolutely improvements that can be made to this such as a flag system similar to my ECS implementation's which will add a very fast check to see if a component exists on an entity. This gets complicated quickly however and can affect extensibility due to a need to register components somewhere in the architecture. This is absolutely possible, but still not entirely ideal. As an aside, this is interesting to note when thinking in the context of a general purpose engine such as Unity. Unity uses a component pattern for it's GameObjects, and it's suggested in many development communities that the GetComponent method's return value be cached in a variable if needed multiple times due to how expensive it is. This exploration into a potential implementation of the method shows how and why it can be so expensive to call, and should serve as a reminder to do such caching if you end up implementing a component pattern in your own game architecture. \subsubsection{Usability and Maintainability} This is what I would consider the "middle ground" of the two extremes discussed in this report. It is still at it's core object oriented, defining an entity as an object that has a set of responsibilities. The key difference between this and a pure object oriented approach such as what has been seen in Architecture A is that the responsibilities are now contained within objects belonging to the entity. This still maintains much of what we set out to do with object oriented design, but now removes the need for deep inheritance trees for more complex entity behaviour. This alone makes the maintainability of the architecture far easier, since now there are less opportunities for bugs that may occur through deep inheritance trees or multiple inheritance. Add to this that the extensibility is incredibly simple due to realistically only needing to add components to introduce new behaviour. This all makes for an incredibly sensible architecture for the way most developers might think about a game's logic, in turn making it easy for many to work with. To once again analyse this in the context of general purpose engines, it makes sense why this is a pattern so often seen in their designs. While it is most certainly not the most performant (though as previously mentioned, there are absolutely ways in the real world to optimise this) it allows for faster development which means faster iteration times. Often this can outweigh the performance cost, especially in situations where money is at stake since you want to ensure you can iterate quicker to get your product completed sooner. It is also what I would describe as being the most general purpose, being excellent for smaller sets of entities or even one use entities, but still having the ability to scale up to manage larger sets of entities. \subsection{Architecture C} \subsubsection{Performance} Once again referring to section \ref{arch_comparison} we are able to see just how much faster ECS is over any other architecture. In fact not only is it faster, but it's \textit{significantly} faster than the next fastest architecture. The reasoning behind this is rather interesting, because of how low level this ends up getting. For starters we have our two systems which are always guaranteed to complete in O(n) time. Since they're both constant and have no bearing on each other, this makes the overall time complexity of the update algorithm O(n). So this would make it the same time complexity as Architecture A, right? This is where things start to get complicated when talking about time complexity measurements, and also serves as a good example of why big O notation shouldn't be taken as gospel and instead needs to be understood in context. You see, in Architecture A we iterate over every entity and perform a simple operation on each of them, making the execution time O(n). In this architecture we perform two separate O(n) time operations one after another, which will then simplify down to completing in O(n) time due to both being constant. The difference between the two however is what n actually is. In Architecture A n is the number of entities, while in this architecture n is both the number of entities registered for the movement system and the number of entities registered for the colour shift system. The reason why this is important is because in this architecture, n is almost certainly going to be bigger than in Architecture A given the same data set. This is due to the two systems potentially working on the same entities multiple times for different things. So this architecture is slower in absolute terms when looking at the time complexity, and yet it's faster than Architecture A. Why is this? This has already been alluded to a few times earlier in this report, but cache is important here. Since all levels of cache are significantly faster to access than RAM, this makes it a prime area for optimisation with data intensive tasks. So how can we improve performance with the cache? The simple answer is that we want to make sure our data is sent to the cache by the CPU, and that as much of it is possible is sent. That way when it comes the time to actually perform these operations on the data, reads and writes on the data will be significantly faster which in turn makes for a faster overall algorithm. There are a few things which can influence the sending of data to the cache and amount that is sent, many of which ECS at it's core exploit. One such thing is the tesselation of data in the cache, which simply means how tightly packed the data can be. This is why we always ensure that the data we use in an ECS implementation is simple, small, and in tightly packed arrays. This increases the tesselation ability of a given set of data, allowing us to send more of it to the cache at the same time. This leads nicely onto a problem that we aim to avoid when using an ECS--cache misses. There are two types of cache misses that we care about: data cache misses and instruction cache misses. In both cases a miss happens when something is requested from the cache but it isn't there yet. Avoiding misses on instructions generally means that you want to ensure that the systems you write are as simple as possible. This ensures that as much of the code related to the system is sent into cache as possible, which makes reading what actions the CPU must perform next far quicker. Data misses would generally speaking be less impactful than instruction misses, but when the data set scales to larger and larger sizes we start to see increasing performance impacts due to data misses. Avoiding such misses is potentially more complicated for a variety of reasons. There are simpler things we can address such as how much data can be sent to the cache in a given call, which is why we try to ensure data can be packed together as tightly as possible. There's a much more complex and important problem we face however, and that's cache pre-fetching\footnote{This is something that I'm still not entirely clear on. This lecture provided a good starting point however: http://home.eng.iastate.edu/~zzhang/cpre581/lectures/Lecture17-1p.pdf}. The reason why this gets so complicated is because of variance in CPUs. You see, normally we might talk about how broader architectures differ such as comparing x86\_64 to the ARM family of architectures. However when we start talking about aspects of CPUs that are as low level as how we load data into the cache, we then need to start discussing differences in CPU manufacturers. You see while the specs of two CPUs, one from Intel and one from AMD, might look the same on paper, they can perform wildly different in different scenarios. This comes down to the implementation details of a CPU, decided by the engineers that designed the CPU. Of interest to us is the method of cache pre-fetching, because this indicates to us how we might want to write our code in order to make sure that data is reliably dispatched to the CPU. Admittedly, this does get into the realm of crazy low level optimisation so most programmers will not need to think about this. However, when your cache optimisations don't behave as you expect them to then this is something that you should bear in mind as a potential reason for the unexpected behaviour. A final thing I'll mention is that ECS lends itself particularly well to a compiler optimisation called inlining. The idea behind inlining is simple: move the contents of a function call to where the function should be normally be called from. I'm of course glossing over this massively, but this is the general idea behind it, and lets us understand the kind of performance benefits that it can bring. At a low level, functions don't exist in the way that we think of them as existing. This is because assembly, the higher level abstraction of raw machine code, generally only has a very bare bones selection of instructions at it's disposal. This allows it to do everything that we expect our computers to do, but does mean writing code can be difficult due to how verbose and difficult to maintain it can be (which is why languages such as C first came to be). So functions don't really exist in assembly, but we do have ways around this so that we can still reuse generic code for multiple purposes. This is done with a structure implemented on the hardware (though sometimes you find software implementations too) called a stack, which is pretty much the same as something like the stack collection found in the C++ standard library. At a high level, when we wish to call a function we will push the current memory address found at the program counter onto the stack. Then we will jump to the address of the function, execute the code, and then upon returning we will pop the address off the stack and set the program counter to that address. This works really well (and might I add, is kinda cool) but comes with associated overhead. If you're constantly calling functions or methods then you run into the issue of constantly jumping to arbitrary memory addresses, pushing and popping off the stack, and some other things I've not mentioned for the sake of simplicity. So by inlining a function call, we now end up increasing the speed by virtue of removing overhead associated with calling a function. There are, of course, caveats. Inlining isn't some pancea that can solve your performance issues. In fact, it's generally a fairly niche usecase where inlining can improve performance by a significant amount. This is compounded by the fact that inlining manually, if you're not careful, can have knock-on effects which negatively affect your code. Generally speaking, programmers will simply allow the compiler to automatically inline what it deems as reasonable (and indeed, they're all pretty good at that these days). This all does come at a cost, and that cost is space. Due to the literal duplication of code, you'll very likely see some amount of increase in size of your compiled binary. This generally might not be an issue on modern PCs, but is all the same something to consider depending on the application you're writing for (eg. if you're writing a demo for some niche hardware then size will become very important). \subsubsection{Usability and Maintainability} At a high level, it can look like there are many parallels with Architecture B when it comes to the usability and maintainability. If we wish to add new functionality, then all we really need to do is add new components and systems to act on them--not dissimilar from how we add functionality through a component pattern based architecture. And indeed, compared to something like Architecture A I would argue that it is easier to add functionality to, especially at scale. The problem, however, lies in the fact that ECS is really designed for large scale architectures. The entire idea behind ECS is performance at scale, which is something we absolutely see when compared directly against all other architectures discussed in this report. When applied to smaller scale applications it still absolutely is performant, but it becomes less reasonable to maintain. This is because before one is able to create game logic using ECS, you must implement the supporting architecture. Not only this, but since the key advantage it has over it's contemporaries is the performance you need to be constantly vigilant of how you've implemented features. An excellent example of this lies within an early attempt at optimising my implementation using Austin Morlan's solution for ensuring all data is tightly packed. In their implementation, they created a template class that could hold a generic collection of any type and would always ensure tight packing of data\footnote{I won't go into detail here as to how it works for the sake of brevity. If you're interested, however, you can find my interpretation at https://github.com/pondodev/research-project/blob/a0a75d5856a007baaeec22d9ce82fcad918fb5ee/program/architecture\_c/component\_container.h}. In theory this would be an excellent boost to speed since we can now always ensure that the data within the collection is tightly packed, allowing us to send more data during pre-fetching. However in my testing this ended up making my ECS implementation slower than every single other architecture tested, and by a very significant margin too. I've some thoughts on why this might have happened, but I ultimately think the largest hit to performance is the indexing of an unordered map. While it is absolutely possible that we've aided the pre-fetching prediction algorithms (though there are some arguments to the contrary) there is now added computation time from attempting to index an unordered map. In the pursuit of optimising cache usage, we have added an extra burden on the CPU which ultimately outweighed what benefits were gained. It's this kind of thought and care that is required when writing your own ECS implementation that can ultimately make it a hard architecture to write and use, at least initially. In theory most of this should be a non-issue once robust implementations are in place, but to get to that point is rather difficult. \subsection{Architecture D} \subsubsection{Performance} No performance was measured for this architecture. \subsubsection{Usability and Maintainability} Since this is more or less the same as Architecture C, many of the same points apply here. There is one very key drawback to this though, and that is the complexity introduced through interacting with low level graphics APIs. Being as low level as it is, there's already a level of verbosity and technicality that makes it difficult to work with unless you have much prior knowledge. For example, you have to understand how the OpenGL state machine operates, how textures are created/used in the state machine, how to dispatch compute programs, how to appropriately designate work group size, how to batch dispatches, etc. This is all already rather difficult, but to make matters worse you also get very little in the way of feedback from your shader programs bar the input and output. It makes for not only a complex architecture, but one that is incredibly hard to debug due to the nature of what backs it. This also feeds into another issue with the usability and maintainability of this architecture once we add compute shaders to it. You see, previously we had spoken about the kind of challenges one would face when creating and improving upon an ECS implementation, and of course the same challenges apply here. There is a new challenge that arises however, and is actually what stopped me from being able to complete the architecture for this report. There is a long and complex set of tasks you need to complete in order to prepare OpenGL to process arbitrary data through compute shaders, but of note to us is the preparation of input/output buffers and scaling work group sizes during runtime. There are a variety of methods that one can use to send and retrieve data from a compute shader, but for the sake of simplicity I have opted to use textures. This is mostly because there are already many abstractions that exist within OpenGL to work with textures which makes the writing and blitting to and from them incredibly trivial. Textures do however have a maximum size that they can be (my running theory is because of 16-bit addressing/indexing, but I can't be sure) which means that you can only send so much data at a time to and from the GPU. To add to this, compute shaders also suffer from the same problem (again, likely because of 16-bit addressing/indexing) when defining work group sizes. This does mean that neither one of these aspects limits the other, which is some cold comfort, but does mean we now need to implement dynamic batch dispatching. The idea behind this is simple: dispatch data and the program in subsections of the larger data set so that everything can be processed without extending past the technical limits of the hardware and software used. Add to this that you need to be incredibly smart about what you do/don't do, because most any operation in this chain that makes up a compute shader bears a heavy amount of overhead. Because of this, if you're not careful then you may end up with worse performance, therefore defeating the purpose of implementing compute shaders in the first place. So in summary, the theory tells us that compute shaders should increase performance by a significant amount over a traditional CPU implementation. Whether or not this is the case, unfortunately I have been unable to test. What can be said for certain though is that by introducing compute shaders you increase architectural complexity by a very, very significant amount. It's for this reason that I see it as a very nuclear solution--that is, it should not be your first option. Turn to this if you find that this is the only option left, because if you are to delve into this rabbit hole then know that there be dragons ahead. \section{Conclusion} Perhaps this is not the most exciting conclusion, especially for those who feel most passionate about any one of these architectures. I do however strongly believe that there is no single "best" architecture design, and my findings only further reflect this. Not one of these designs casts a wide enough net over each use case and problem to allow me to point at it and say "this is the one". Instead, I would posit that each of these designs compliment each other. Each can account for another's shortcomings, and some may even serve as a way to assist the design of others. So ultimately, it depends. Refer to the analyses of each architecture design to understand what may fit your needs best, and whether you may need to instead combine two or more together to create a solution that works best for you. \end{document}
{ "alphanum_fraction": 0.755694211, "avg_line_length": 44.2166007905, "ext": "tex", "hexsha": "ff62cab1bbf1b3820cfae7db9f7e72853f7821f6", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ab012904825e67b58944d45251cea40e57bae80f", "max_forks_repo_licenses": [ "Unlicense" ], "max_forks_repo_name": "pondodev/research-project", "max_forks_repo_path": "report/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ab012904825e67b58944d45251cea40e57bae80f", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "Unlicense" ], "max_issues_repo_name": "pondodev/research-project", "max_issues_repo_path": "report/main.tex", "max_line_length": 202, "max_stars_count": null, "max_stars_repo_head_hexsha": "ab012904825e67b58944d45251cea40e57bae80f", "max_stars_repo_licenses": [ "Unlicense" ], "max_stars_repo_name": "pondodev/research-project", "max_stars_repo_path": "report/main.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 13721, "size": 55934 }
\section{Introduction} \begin{frame} \frametitle{How to use this template} \begin{block}{Preparation} \begin{enumerate} \item \emph{Fork} this repository \item Install \LaTeX{} and \emph{latexmk} \end{enumerate} \end{block} \begin{block}{Build pdf} Run \raise1pt\hbox{\fbox{\lstinline[language = sh]|make|}} \begin{itemize} \thusitem slide.pdf should be generated \end{itemize} \end{block} \end{frame} \section{Some Templates} \begin{frame}[fragile] \frametitle{Your first slide} \begin{columns} \begin{column}{0.55\textwidth} \lstinputlisting[ language = tex, linewidth = \textwidth ]{./fig/sample-02.tex} \end{column} \begin{column}{0.45\textwidth} \begin{itemize} \item You can set the font size{\footnotesize (Now it's 10 pt)} \item Start comments with \alert{\%} \item Each pages are called \emph{frames} in Beamer \end{itemize} \end{column} \end{columns} \end{frame} \begin{frame}[fragile] \frametitle{Simple List} \begin{columns} \begin{column}{0.51\textwidth} \lstinputlisting[ language = tex, linewidth = \textwidth ]{./fig/sample-03.tex} \end{column} \begin{column}{0.49\textwidth} Use \emph{itemize} to list things \begin{itemize} \item This is the first item. \item This is the second item. \item This is the third item. \end{itemize} \end{column} \end{columns} \begin{columns} \begin{column}{0.51\textwidth} \lstinputlisting[ language = tex, linewidth = \textwidth ]{./fig/sample-04.tex} \end{column} \begin{column}{0.49\textwidth} Use \emph{enumerate} if the order matters \begin{enumerate} \item Do this first \item And then do that \end{enumerate} \end{column} \end{columns} \end{frame} \begin{frame}[fragile] \frametitle{Bored with bullet points?} \begin{columns} \begin{column}{0.54\textwidth} \lstinputlisting[ language = tex, linewidth = \textwidth ]{./fig/sample-05.tex} \end{column} \begin{column}{0.46\textwidth} \begin{itemize} \ngitem Bullet points cannot show the relation of the items \thusitem You may find it good to use some extra symbols \end{itemize} \end{column} \end{columns} \begin{mybox}{Custom \texttt{\textbackslash item} Macros} \begin{itemize} \okitem \verb|\okitem|: OK ! \ngitem \verb|\ngitem|: NG \thusitem \verb|\thusitem|: Thus, \dots \butitem \verb|\butitem|: But, \dots \egitem \verb|\egitem|: For example, \dots \end{itemize} \end{mybox} \end{frame} \begin{frame}[fragile] \frametitle{Emphasize your key points!} \begin{enumerate} \item Make sure to \verb|\emph{Emphasize}| your \emph{key points} \footnote{\insertfootnotemark I let them to be bold} \item \verb|\Emph{Use this}| to empasize \Emph{very important point} \footnote{\insertfootnotemark I defined customly} \item We have \verb|\EMPH{this command}| for the \EMPH{very very very important notes}% \footnote{\insertfootnotemark I defined customly} \end{enumerate} \end{frame} \begin{frame}[fragile] \frametitle{Block, Definition, Examples} \begin{columns} \begin{column}{0.52\textwidth} \lstinputlisting[ language = tex ]{./fig/sample-06.tex} \end{column} \begin{column}{0.48\textwidth} \begin{block}{Title of the block} Here is the body sentences of the block. Notice foo is not bar! \end{block} \end{column} \end{columns} \begin{columns} \begin{column}{0.52\textwidth} \lstinputlisting[ language = tex ]{./fig/sample-07.tex} \end{column} \begin{column}{0.48\textwidth} \begin{definition} A prime number is \dots \end{definition} \begin{example} \begin{itemize} \okitem 2 is a prime number. \okitem 3 is also a prime number. \ngitem 4 is not a prime number. \end{itemize} \end{example} \end{column} \end{columns} \end{frame} \begin{frame}[fragile]{Source codes} \lstinputlisting[ language = tex, linewidth = 0.7\textwidth ]{./fig/sample-10.tex} \begin{itemize} \item You need to pass \Emph{fragile} as the option of frame \item There are a lot of options in the listings package \begin{itemize} \egitem language, linewidth, $\dots$ \end{itemize} \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{Inserting images} \begin{columns} \begin{column}{0.67\textwidth} \lstinputlisting[ language = tex, linewidth = \textwidth ]{./fig/sample-11.tex} \end{column} \begin{column}{0.33\textwidth} \includegraphics[width = 3cm]{./fig/logo} \end{column} \end{columns} \end{frame} \begin{frame}{Equation} \begin{equation} x = a_0 + \cfrac{1}{a_1 + \cfrac{1}{a_2 + \cfrac{1}{a_3 + \cfrac{1}{a_4} } } } \end{equation} \[ \sqrt[n]{1+x+x^2+x^3+\dots+x^n} \] \end{frame}
{ "alphanum_fraction": 0.6138405659, "avg_line_length": 24.3302325581, "ext": "tex", "hexsha": "58afc95cffc2eff1c35f23925173a9cd9ee0e0ee", "lang": "TeX", "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2021-11-06T06:15:32.000Z", "max_forks_repo_forks_event_min_datetime": "2021-06-05T00:07:40.000Z", "max_forks_repo_head_hexsha": "e503e4b561b93dbd21fcf7efc7cba4ea2045b779", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sano-jin/express-beamer", "max_forks_repo_path": "tex/02-introduction.tex", "max_issues_count": 5, "max_issues_repo_head_hexsha": "e503e4b561b93dbd21fcf7efc7cba4ea2045b779", "max_issues_repo_issues_event_max_datetime": "2021-11-29T12:26:13.000Z", "max_issues_repo_issues_event_min_datetime": "2021-08-23T05:38:04.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "TeXtw/express-beamer", "max_issues_repo_path": "tex/02-introduction.tex", "max_line_length": 85, "max_stars_count": 16, "max_stars_repo_head_hexsha": "e503e4b561b93dbd21fcf7efc7cba4ea2045b779", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "TeXtw/express-beamer", "max_stars_repo_path": "tex/02-introduction.tex", "max_stars_repo_stars_event_max_datetime": "2022-02-26T03:38:21.000Z", "max_stars_repo_stars_event_min_datetime": "2021-06-05T03:24:48.000Z", "num_tokens": 1663, "size": 5231 }
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % Short Sectioned Assignment % LaTeX Template % Version 1.0 (5/5/12) % % This template has been downloaded from: % http://www.LaTeXTemplates.com % % Original author: % Frits Wenneker (http://www.howtotex.com) % % License: % CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/) % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %---------------------------------------------------------------------------------------- % PACKAGES AND OTHER DOCUMENT CONFIGURATIONS %---------------------------------------------------------------------------------------- \documentclass[paper=a4, fontsize=11pt]{scrartcl} % A4 paper and 11pt font size \usepackage[T1]{fontenc} % Use 8-bit encoding that has 256 glyphs \usepackage{fourier} % Use the Adobe Utopia font for the document - comment this line to return to the LaTeX default \usepackage[english]{babel} % English language/hyphenation \usepackage{amsmath,amsfonts,amsthm} % Math packages \usepackage{lipsum} % Used for inserting dummy 'Lorem ipsum' text into the template \usepackage{sectsty} % Allows customizing section commands \allsectionsfont{\centering \normalfont\scshape} % Make all sections centered, the default font and small caps \usepackage{fancyhdr} % Custom headers and footers \pagestyle{fancyplain} % Makes all pages in the document conform to the custom headers and footers \fancyhead{} % No page header - if you want one, create it in the same way as the footers below \fancyfoot[L]{} % Empty left footer \fancyfoot[C]{} % Empty center footer \fancyfoot[R]{\thepage} % Page numbering for right footer \renewcommand{\headrulewidth}{0pt} % Remove header underlines \renewcommand{\footrulewidth}{0pt} % Remove footer underlines \setlength{\headheight}{13.6pt} % Customize the height of the header \numberwithin{equation}{section} % Number equations within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4) \numberwithin{figure}{section} % Number figures within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4) \numberwithin{table}{section} % Number tables within sections (i.e. 1.1, 1.2, 2.1, 2.2 instead of 1, 2, 3, 4) \setlength\parindent{0pt} % Removes all indentation from paragraphs - comment this line for an assignment with lots of text %---------------------------------------------------------------------------------------- % TITLE SECTION %---------------------------------------------------------------------------------------- \newcommand{\horrule}[1]{\rule{\linewidth}{#1}} % Create horizontal rule command with 1 argument of height \title{ \normalfont \normalsize \textsc{How to Learn to Code} \\ [25pt] % Your university, school and/or department name(s) \horrule{0.5pt} \\[0.4cm] % Thin top horizontal rule \huge R Syllabus \\ % The assignment title \horrule{2pt} \\[0.5cm] % Thick bottom horizontal rule } \author{Amy Pomeroy} % ADD YOUR NAME HERE IF YOU CONTRIBUTE!! \date{\normalsize\today} % Today's date or a custom date \begin{document} \maketitle % Print the title %---------------------------------------------------------------------------------------- % FIRST CLASS %---------------------------------------------------------------------------------------- \section{First Class - The Basics} The goal of this class is to introduce the basics of R and get students comfortable working in RStudio. It also serves as a good time to make sure that all students have R and RStudio up and running on their computers. %------------------------------------------------ \subsection{Class expectations} \begin{enumerate} \item Use the basic math operators (+, -, *, /) \item Use the assignment operator and how to use it (<-) \item Understand what a function is, how to use a function, and understand some basic functions \item Understand the three most common data classes (character, numeric, logical) \item Apply the basic comparison operators (>, <, ==, >=, <=) \item Compare objects, and predict the data classes and how they change when comparing objects \end{enumerate} %---------------------------------------------------------------------------------------- % SECOND CLASS %---------------------------------------------------------------------------------------- \section{Second Class - Data Structures} Be sure to review the information from the previous class (5-10 minutes). Then go over the four basic data structures. Be sure to emphasize the similarities and differences between the data structures. Finally, discuss how to subset each structure, again emphasizing similarities and differences. %------------------------------------------------ \subsection{Class expectations} \begin{enumerate} \item Understand the basic R data structures (vector, matrix, list, data frame) \item Subset the four basic data structures \end{enumerate} %---------------------------------------------------------------------------------------- % THIRD CLASS %---------------------------------------------------------------------------------------- \section{Third Class - Plotting Data} Start this class by introducing how to import data from a csv file. Then review of subsetting by using examples from the imported data, as understanding how to subset the data will make plotting much easier. Then go over the arguments of the basic plot function. It would be good if you made a lesson plan for this yourself with data that you find interesting. Please write it up in the same format as the other documents and save it to the GitHub so others can use it. %------------------------------------------------ \subsection{Class expectations} \begin{enumerate} \item Import data from a csv file format \item Use the arguments of the plot function \item Make basic plots \end{enumerate} %---------------------------------------------------------------------------------------- % FOURTH CLASS %---------------------------------------------------------------------------------------- \section{Fourth Class - Control Statments} This is typically the most challenging class for a lot of students. This class does not require a review of plotting to be successful. Make sure to start with very simple examples and only build complexity as the students are understanding. This is a really important concept and takes some patience to teach well. %------------------------------------------------ \subsection{Class expectations} \begin{enumerate} \item Implement the three basic control statements in R (for-loops, if/else statements, and while statements) \item Learn the and/or operators for combining logical statements \end{enumerate} %---------------------------------------------------------------------------------------- % FIFTH CLASS %---------------------------------------------------------------------------------------- \section{Fifth Class - Functions} If your students are struggling with control loops it would be good to do more control loop practice today and push this lesson back a day. Todays goal is to teach how to write and use functions in R. Be sure to emphasize why they would want to know how to write functions and how functions would be able to help in their research. %------------------------------------------------ \subsection{Class expectations} \begin{enumerate} \item Write and run a basic function in R \item Understand function environments and how functions find things \item Understand the "do not repeat yourself" (DRY) principle \end{enumerate} %---------------------------------------------------------------------------------------- % SIXTH CLASS %---------------------------------------------------------------------------------------- \section{Sixth Class - Packages} You may not reach this lesson if your students struggled with control loops and that's okay. You can alway hand out the lecture notes to those students that are interested. The focus of this lecture is on doing reproducible coding (something we can all work on). %------------------------------------------------ \subsection{Class expectations} \begin{enumerate} \item Install and load R packages \item Consider some principles of reproducible research \item Know the basic components of an R package \item Create a simple R package using RStudio and roxygen2 \end{enumerate} %---------------------------------------------------------------------------------------- % SEVENTH AND EIGHTH CLASSES %---------------------------------------------------------------------------------------- \section{Seventh and Eighth Classes - Final Projects} Devote the last two classes to working on a final project of your choosing. This can be done individually or in groups. Some sample projects will be provided. %---------------------------------------------------------------------------------------- \end{document}
{ "alphanum_fraction": 0.5855263158, "avg_line_length": 45.6787564767, "ext": "tex", "hexsha": "f4da915603d8c9640e6ac209529a6e28349493c3", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2019-07-03T03:37:31.000Z", "max_forks_repo_forks_event_min_datetime": "2018-06-07T16:11:47.000Z", "max_forks_repo_head_hexsha": "36ad284f3faaec6a0582b3e0df23725cfa00a375", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "How-to-Learn-to-Code/rclass", "max_forks_repo_path": "inst/other/syllabus.tex", "max_issues_count": 42, "max_issues_repo_head_hexsha": "36ad284f3faaec6a0582b3e0df23725cfa00a375", "max_issues_repo_issues_event_max_datetime": "2020-06-08T21:07:21.000Z", "max_issues_repo_issues_event_min_datetime": "2018-06-01T14:56:27.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "How-to-Learn-to-Code/rclass", "max_issues_repo_path": "inst/other/syllabus.tex", "max_line_length": 332, "max_stars_count": null, "max_stars_repo_head_hexsha": "36ad284f3faaec6a0582b3e0df23725cfa00a375", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "How-to-Learn-to-Code/rclass", "max_stars_repo_path": "inst/other/syllabus.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1779, "size": 8816 }
% Tables from https://docs.google.com/spreadsheets/d/1DiFTjsC4dP8XyOV7-uF0zwkl0r0jMuW9U9uELejpmn8/edit#gid=0 \section{Introduction} This document presents the simplified sizing model for Rubin Observatory data management in \secref{sec:sizemodel} based on detailed sizing presented in \secref{sec:sizeinputs}. \secref{sec:cost} presents a very high level budget summary for DM hardware which was used for \href{https://project.lsst.org/groups/ccb/node/3889}{LCR-2148}. More interesting now is the build up at the USDF in pre-operations which is shown in \secref{sec:preopscost} and the full operations estimates in \secref{sec:opscost} This version is in agreement with SLAC on the parameters for CPU and disk price fall as well as CPU cost etc. \section{Construction budget}\label{sec:cost} A high level bottom line is given in \tabref{tab:Summary}. The remainder of the document is all the details that went into that. \input{Summary} We have applied a modest cost reduction assuming that processors and disks get a little cheaper - that percentage is given in \tabref{tab:Inputs} along with many other parameters. \tabref{tab:Inputs} also contains the number of nodes we assume to need for Qserv. Specific costs for storage are detailed in \tabref{tab:Storage} and for compute in \tabref {tab:Machines} the following budgets can be considered. The detailed annual purchasing based on those prices is given for storage in \tabref{tab:StorageCost} and for compute in \tabref{tab:Rome}. \input{preOperations} \input{ops} \section{Cost details} The summary table (\tabref{tab:Summary}) uses Xeon pricing for compute as shown in \tabref{tab:Xeon}. \input{Xeon} An alternative architecture would be Rome - SLAC have chosen this for the Ops pricing, \tabref{tab:Rome} gives the price of compute based on Rome -small and large. Rome large are used in the operations calculations. \input{Rome} \tabref{tab:StorageCost} gives the price of storage using all types that we need. This would be needed regardless of the compute chosen. \input{StorageCost} \tabref{tab:overheadCost} gives the annual cost of hosting compute at NCSA for construction. This includes purchasing racks to house new nodes etc. \input{overheadCost} \input{opsdetails} \section{ Models}\label{sec:model} \subsection{Sizing model}\label{sec:sizemodel} An exhaustive and detailed mode is provided in \citedsp{LDM-138,LDM-144} - here we concentrate on the needs for the final years of construction. We explore the compute and storage needed to get us through commissioning and suggest a 2023 purchase for DR1,2 processing which could be pushed to operations. \tabref{tab:Inputs} gives the annual requirements for the next few years. \input{Inputs} \subsection{Compute and storage }\label{sec:csmodel} We which to base our budget on reasonable well know machines for which we have well know prices. \tabref{tab:Machines} gives an outline of a few standard machines we use and a price. This table also gives a FLOP estimate for those machines. \tabref{tab:Storage} gives costs for different types of storage - we will require various latency for different tasks and those have varying costs. These tables are used as look ups for the cost models in \secref{sec:cost} \input{Storage} In \tabref{tab:Storage} we should consider for NVMe for each TB with file system servers two DDN NVMe box with GPFS servers. The price is based on the TOP performer with best price . The Normal price is for each TB with file system disks and servers locally attached to production resources. In the latency and high latency prices are only at NCSA: for each TB with file systems and all people/services. The complete service not usually attached. S3 bucket type. Can be mounted if needed but not for production worthy speeds. The complete service with data flowing to tape using policies. \input{Machines} There is also an associated running cost for machines included in the total cost of ownership. These overheads are listed in \tabref{tab:overheads}. \input{overheads}
{ "alphanum_fraction": 0.7924993832, "avg_line_length": 46.0568181818, "ext": "tex", "hexsha": "e672693b58d23460a88b999da2f859b9d9602db9", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8510efddd1bd83ec3198ab835e6db9efbd0a111d", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "lsst-dm/dmtn-135", "max_forks_repo_path": "body.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "8510efddd1bd83ec3198ab835e6db9efbd0a111d", "max_issues_repo_issues_event_max_datetime": "2021-02-04T21:55:46.000Z", "max_issues_repo_issues_event_min_datetime": "2021-02-04T05:46:53.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "lsst-dm/dmtn-135", "max_issues_repo_path": "body.tex", "max_line_length": 188, "max_stars_count": null, "max_stars_repo_head_hexsha": "8510efddd1bd83ec3198ab835e6db9efbd0a111d", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "lsst-dm/dmtn-135", "max_stars_repo_path": "body.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1006, "size": 4053 }
\subsection{Unified Cross-Validation and Training, Validation, and Test Sets} \label{unified_cv} The standard $k$-fold CV, which assumes no structure in the individual features of the samples, as shown in $\mat{X}$ above, is adapted to the ordinal character of time series data: A model must be evaluated on observations that occurred strictly after the ones used for training as, otherwise, the model knows about the future. Furthermore, some models predict only a single to a few time steps before being retrained, while others predict an entire day without retraining (cf., Sub-section \ref{ml_models}). Consequently, we must use a unified time interval wherein all forecasts are made first before the entire interval is evaluated. As whole days are the longest prediction interval for models without retraining, we choose that as the unified time interval. In summary, our CV methodology yields a distinct best model per pixel and day to be forecast. Whole days are also practical for managers who commonly monitor, for example, the routing and thus the forecasting performance on a day-to-day basis. Our methodology assumes that the models are trained at least once per day. As we create operational forecasts into the near future in this paper, retraining all models with the latest available data is a logical step. \begin{center} \captionof{figure}{Training, validation, and test sets during cross validation} \label{f:cv} \includegraphics[width=.8\linewidth]{static/cross_validation_gray.png} \end{center} The training, validation, and test sets are defined as follows. To exemplify the logic, we refer to Figure \ref{f:cv} that shows the calendar setup (i.e., weekdays on the x-axis) for three days $T_1$, $T_2$, and $T_3$ (shown in dark gray) for which we generate forecasts. Each of these days is, by definition, a test day, and the test set comprises all time series, horizontal or vertical, whose last observation lies on that day. With an assumed training horizon of three weeks, the 21 days before each of the test days constitute the corresponding training sets (shown in lighter gray on the same rows as $T_1$, $T_2$, and $T_3$). There are two kinds of validation sets, depending on the decision to be made. First, if a forecasting method needs parameter tuning, the original training set is divided into as many equally long series as validation days are needed to find stable parameters. The example shows three validation days per test day named $V_n$ (shown in darker gray below each test day). The $21 - 3 = 18$ preceding days constitute the training set corresponding to a validation day. To obtain the overall validation error, the three errors are averaged. We call these \textit{inner} validation sets because they must be repeated each day to re-tune the parameters and because the involved time series are true subsets of the original series. Second, to find the best method per day and pixel, the same averaging logic is applied on the outer level. For example, if we used two validation days to find the best method for $T_3$, we would average the errors of $T_1$ and $T_2$ for each method and select the winner; then, $T_1$ and $T_2$ constitute an \textit{outer} validation set. Whereas the number of inner validation days is method-specific and must be chosen before generating any test day forecasts in the first place, the number of outer validation days may be varied after the fact and is determined empirically as we show in Section \ref{stu}. Our unified CV approach is also optimized for large-scale production settings, for example, at companies like Uber. As \cite{bell2018} note, there is a trade-off as to when each of the inner time series in the example begins. While the forecasting accuracy likely increases with more training days, supporting inner series with increasing lengths, cutting the series to the same length allows caching the forecasts and errors. In the example, $V_3$, $V_5$, and $V_7$, as well as $V_6$ and $V_8$ are identical despite belonging to different inner validation sets. Caching is also possible on the outer level when searching for an optimal number of validation days for model selection. We achieved up to 80\% cache hit ratios in our implementation in the empirical study, thereby saving computational resources by the same amount. Lastly, we assert that our suggested CV, because of its being unified around whole test days and usage of fix-sized time series, is also suitable for creating consistent learning curves and, thus, answering \textbf{Q3} on the relationship between forecast accuracy and amount of historic data: We simply increase the length of the outer training set holding the test day fixed. Thus, independent of a method's need for parameter tuning, all methods have the same demand history available for each test day forecast.
{ "alphanum_fraction": 0.7660294703, "avg_line_length": 57.724137931, "ext": "tex", "hexsha": "d3967c2f8334afe783e1548bdb30d59ec5dc3e3a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "9ee3396a24ce20c9886b4cde5cfe2665fd5a8102", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "webartifex/urban-meal-delivery-demand-forecasting", "max_forks_repo_path": "tex/3_mod/4_cv.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9ee3396a24ce20c9886b4cde5cfe2665fd5a8102", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "webartifex/urban-meal-delivery-demand-forecasting", "max_issues_repo_path": "tex/3_mod/4_cv.tex", "max_line_length": 78, "max_stars_count": 1, "max_stars_repo_head_hexsha": "9ee3396a24ce20c9886b4cde5cfe2665fd5a8102", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "webartifex/urban-meal-delivery-paper-demand-forecasting", "max_stars_repo_path": "tex/3_mod/4_cv.tex", "max_stars_repo_stars_event_max_datetime": "2022-01-25T19:40:56.000Z", "max_stars_repo_stars_event_min_datetime": "2022-01-25T19:40:56.000Z", "num_tokens": 1128, "size": 5022 }
\title{Computer Architecture - CS 301} % You may change the title if you want. \author{Rishit Saiya - 180010027, Assignment - 1 } \date{\today} \documentclass[12pt]{article} \usepackage{fullpage} \usepackage{enumitem} \usepackage{amsmath,mathtools} \usepackage{amssymb} \usepackage[super]{nth} \usepackage{textcomp} \usepackage{hyperref} \begin{document} \maketitle \section{} \begin{enumerate}[label=(\alph*)] \item \textbf{2’s complementary method for signed integers} \\ The general representation of a binary number with n bit is $n_1n_2n_3..n_n$ where $n_i$ where i $\in$ \{0,1\}. \\ \textbf{For example:} $5$ in Binary Representation is denoted as $101$. \textbf{Method-1 : Using 2's Complement} \\ Now when we talk about Signed Integers, its different. If we are converting a positive number, the binary representation remains same. If its negative, then we calculate the binary of $2^{n} - |u|$, where we want to find binary representation of $-u$. \\ \textbf{For example:} $-5$ in Binary Representation is denoted as $1011$ in a 4-bit Binary system because $2^{4} - |5|$ = 11 and its binary is 1011. \textbf{Method-2 : Using 1's Complement} \\ A 2's Complement for such can be calculated by many ways among which the most easy one is by 1's complement. In such methods we just invert the bits of signed binary representation and add 1, the resulting binary representation is called the 2's Complement for Signed Integers. \textbf{For example:}\\ 2's Complement of 5: \\ Binary is 101 -$>$ Inverting the bits: 010 -$>$ 2's complement: 011 (Adding 1 to 010). \\ 2's Complement of -5: \\ Binary is 1011 -$>$ Inverting the bits: 0100 -$>$ 2's complement: 0101 (Adding 1 to 0100). \item \textbf{Cons of usage of solely signed bit for representation}\\ The notion where one bit is the sign bit and the rest of the bits represent the magnitude of the integer isn't very feasible because of following reasons: \begin{itemize} \item The representation at zero isn't continuous and we would have 2 representations of $0$ (zero) always. \\ \textbf{For example:} For a 4 bit representation, $0$ can be represented as $0000$ or $1000$. So ambiguity exists. \item The arithmetic operations like Addition and Subtraction become inaccurate in such cases. \\ \textbf{For example:} Let's consider the operation: 5 + (-2) \\ Signed Binary Representation of 5 $\rightarrow$ 0101 \\ Signed Binary Representation of 2 $\rightarrow$ 1010 \\ Operation: \\ \begin{equation*} 0101 + 1010 = 1111 \end{equation*} Actually the operation should result 0011, but we got 1111 (15) instead, which is false. \end{itemize} \end{enumerate} \section{} As given the in the Figure 1, we have to convert $X$ = -2.25 into IEEE 754 format. \begin{figure} \centering \includegraphics[width=15cm, height=5cm]{Single-Precision-IEEE-754-Floating-Point-Standard.png} \caption{IEEE 754 Floating Point Representation 32 bit} \end{figure} The above figure gives us breakage of bits required in each slot.\\ Since, X is negative, the sign bit is \textbf{1}.\\ % The expansion of -2.25 is $2^1$ + $2^{-2}$. So the corresponding binary of -2.25 becomes 10.01. We need to find the E \& M for IEEE 754 notation. Hence, we convert it according to given exponential format as follows:\\ \begin{equation*} X = (-1)^S \times P \times 2^{E-Bias} \end{equation*} where, Bias = 127 \& P = 1 + M.\\ So, now for X = -2.25, the representation becomes: \\ \begin{equation*} X = (-1)^1 \times 1.125 \times 2^{1} \end{equation*} Clearly \textbf{E = 127+1 = 128}. In binary representation, E becomes 10000000 (8-bits). \\ Here P = 1.125 \& P = 1 + M, hence \textbf{M = 0.125}. In binary representation M becomes 001 because 0.125 = $2^{-3}$. So, IEEE 754 Format Representation (32-bit) becomes: \begin{center} \textbf{1 10000000 00100000000000000000000} \end{center} For the Hex Representation, we just group them into four bits and find the Hex equivalent of it and we get \textbf{C0100000}. \section{} To reduce the loss of precision when an underflow occurs, IEEE 754 includes the ability to represent fractions smaller than are possible in the normalized representation, by making the implicit leading digit as 0. Hence the number before the decimal point becomes 0 instead of 1. Such numbers are called Denormal Numbers.\\ The standard representation is: \begin{equation*} A = (-1)^S \times P \times 2^{-126} \end{equation*} where P = (0 + M), 0 $\leq$ M $<$ 1 \\ The significand of this above form is : 0.XXXX. Also E = 0, X = -126. Largest Positive Denormal(LPD) number is as follows: \begin{equation*} X_{LPD} = 0.11...11 (23 \, bits) \times 2^{-126} \end{equation*} \begin{equation*} X_{LPD} = (1-2^{-23}) \times 2^{-126} \end{equation*} \begin{equation*} X_{LPD} = 2^{-126} - 2^{-149} \end{equation*} Smallest Positive Denormal(SPD) number is as follows: \begin{equation*} X_{SPD} = 0.00...01 (23 \, bits) \times 2^{-126} \end{equation*} \begin{equation*} X_{SPD} = (2^{-23}) \times 2^{-126} \end{equation*} \begin{equation*} X_{SPD} = (2^{-149}) \end{equation*} Denormal numbers provide the guarantee that addition and subtraction of floating-point numbers never underflows: two nearby floating-point numbers always have a representable non-zero difference. Without gradual underflow, the subtraction (a - b) can underflow and produce zero even though the values are not equal. \end{document}
{ "alphanum_fraction": 0.6696505423, "avg_line_length": 48.0082644628, "ext": "tex", "hexsha": "d5eb0605abb4b027ad6c89fb3a7bbc8a274c133c", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "1e73e590e88664dcc4ca652a599cdc2cde07a41a", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "rishitsaiya/Computer-Architecture-Theory", "max_forks_repo_path": "Assignment-1/180010027_Rishit.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1e73e590e88664dcc4ca652a599cdc2cde07a41a", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "rishitsaiya/Computer-Architecture-Theory", "max_issues_repo_path": "Assignment-1/180010027_Rishit.tex", "max_line_length": 328, "max_stars_count": 1, "max_stars_repo_head_hexsha": "1e73e590e88664dcc4ca652a599cdc2cde07a41a", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "rishitsaiya/Computer-Architecture-Theory", "max_stars_repo_path": "Assignment-1/180010027_Rishit.tex", "max_stars_repo_stars_event_max_datetime": "2020-12-25T17:20:42.000Z", "max_stars_repo_stars_event_min_datetime": "2020-12-25T17:20:42.000Z", "num_tokens": 1716, "size": 5809 }
\chapter{Conclusion and Future Work} \label{chap:Conclusion} This section outlines the contributions that we have made to IAQA as well a outlining the limitations and drawbacks in section \ref{sec:limitations}. Section \ref{sec:Future Perspectives} gives an outline of both potential for real world applications and areas of research that might provide new learning such as multi modal transformers(section \ref{sec:multi-modal transformers}) Social network (section \ref{sec:social networks}) and continuous learning (section \ref{sec:continuous learning}). Finally we address potential applications of IAQA research and trained models in section \ref{sec:recommended applications}. \section{Main Contributions} \label{main_contributions} We have shown that transformers, even when very large and could not be trained normally on datasets as small as those available for IAQA, do perform well. Further, while initial training of ConViTs on ImageNet1k is often ongoing for almost a week, that using distributed training these models easily transfer to IAQA domain. Even after only a few epochs, they appear to show high training accuracy. This makes them suitable and efficient models (when pre-trained) and further shows that there is a high degree of similarity between what has been learned on ImagNet1k and the IAQA domain, which may seem counter intuitive. This is a useful finding in itself, and is further underscored by the high performance of ViT in early epochs. Many of the images in the AVA benchmarking dataset are ambiguous, and transformers appear to handle these more effectively - with the ability to correctly predict ambiguously scored images where the semantic content is also ambiguous. Newly introduced hyper parameters, such as GPSA, do not appear to show improved ability to adapt. While they converge on less data, they are still not suitable for use on a dataset as small as AVA. We showed that training does not necessarily require warm up epochs for a binary classifier for ConViTs, however this does not seem to be the case for ViT where the initial epoch shows the highest validation accuracy (both overall when compared to other models and to subsequent epochs). There are two potential ways to interpret this: first, that the soft inductive bias cushions the adaptation of pre-trained models with its soft inductive bias, and second, that the warm up epochs simply were not sufficient (set to 3 for all models). This second possibility would be verifiable by conducting more experiments and adjusting the warm up epochs of the scheduler. The former might be verifiable by introducing convolution layers and gradually mixing heads that have convolutions with heads that do not have convolutions. \section{Limitations and Critical Evaluation} \label{sec:limitations} There are several drawbacks to the approach that was taken here. These fall into the following categories: \begin{itemize} \item Training Approach; \item Models; \itme Experimental design; \item Evaluation. \end{itemize}. We trained initial ConViTs\cite{DAscoli2021} using available code at a high level. This made it possible to introduce hyper parameter `locality strength' as this was a high level pragmatic feature, however we did not initially train under identical conditions, with controls such as random state, using the same scheduler and optimiser. This made the initial comparison difficult, and required retraining all models under identical conditions. We did not make additions to models apart form adding a linear layer with outputs corresponding to a binary class, and as such are not able to assess whether adjusting the model architecture would have improved the model performance. This, however, is curtailed by the limitations of using pre-trained models where training transformers from scratch is not possible on the datasets provided or with the compute resources available to us. While some aspects of the architecture, such as the depth and size of the model, are compered, we did not conduct an ablation study which could have provided insights into which part of network are most critical for inference and to explainability of results. This was in part as the size of models required lengthy training, which would have been prohibitive within the timescales for this project. \section{Future Perspectives} \label{sec:Future Perspectives} There is clearly a great deal of potential for more research both in the application domain of IAQA, and within image classification more widely. Where the focus of the general classification tasks on ImageNet1k is geared towards improving model performance and domain applications are focused on sub, or application domains, we make recommendations both in terms of research within the field of IAQA and applications of IAQA to industry. \subsection{Further Research} \label{sec:further research} While we have trained a significant number of different networks to provide a side-by-side comparison, and have made adaptations to a binary classification problem, it would be interesting to see how network architectures might be adapted or how ensemble training might leverage the best of both the ViT and CNN worlds globally as well as locally. Consideration is made here to: \begin{itemize} \item Model Architecture and spatial adjustment including Siamese approaches and unsupervised; \itme Training as 10 class probability distribution and or regression; \item Exploring other areas of data including data dictionary, metadata and textural information. \end{itemize} Vision transformers offer a new perspective, both in qualitative results and in their potential to reach higher accuracy - however, CNN approaches that have inbuilt attention mechanisms still outperform out of the box ViTs and ConViTs. Another option would be training as a 10 bin (probability distribution) to produce a more detailed metric, which might have provided a finer grained analysis as has been provided by \cite{Zhang2021d}. This would further enable side-by-side analysis of accuracy 10 bin probability distribution, where accuracy might be compared with both the majority class as well as mean score. This would allow the comparison of predicted probability distributions alongside actual probability distributions. Further experiments could also examine the possibility of adjusting convolutional features to adjust levels of inductive bias \ref{fig:Inductive Bias EG}. This might include adjusting the stride, but also looking at max or average pooling of convolution feature maps that are larger than ViT patch dimension. Further, many of the images in the AVA benchmarinkg dataset are much larger than $ 224 \times 224$, therefore data is clearly lost in downsampling images. A large feature of the AVA dataset is the size of the images; a significant portion of the 64gb uncompressed dataset is effectively lost. This is further compounded by the fact that image resolution is itself a potential aesthetic attribute, and photographers may have purposefully chosen high resolution images or vice versa as a part of their competition submission. This might require adjusting patch sizes to optimise for the task, as this may be the bottleneck in learning should larger images of $3 \times 256 \times 256 $ for example. \subsubsection{Multi-Modal Transformers} \label{sec:multi-modal transformers} Given that is is well recognised that the semantic content of images has a bearing on aesthetic quality\cite{Simond2015,Mullennix2020}, it would be useful to examine multi-modal approaches to using both natural language and vision transformers, particularly given the successes of natural language transformers such as BERT\cite{Devlin2019, Wolf2020a}. Some key areas within this are: \begin{enumerate} \item \textbf{Key areas within this are}. \begin{enumerate} \item A mapping of Tokeniziton and feature mapping between NLP tokens; \item Training NLP for both sentiment and semantic content form image pages to enhance training and develop context aware feature maps; \item Examining the nature of figurative language and the content of the image (it is well known that parts of speech and written natural language that remain challenging are to do with nuance interpretation, for example, the use of sarcasm or irony. Properties that are intuitive parts of human social interaction; \item Examining what further content there is available on a dataset that is not include with AVA - each individual image page on DPChallenge.com has a great deal of momentary from DPChallenge.com members. \end{enumerate} \end{enumerate} The text descriptions of each image provided by the users of DPChallenge are rich examples of human annotation. \begin{quote} The colors and the smallest structure of bugs is amazing. You have brought to ones eye all we don't see.\cite{Brendel2021} \end{quote} A part of this may also be mapping an online social network (DP.Challenge.com)\footnote{\href{https://www.dpchallenge.com/}{https://www.dpchallenge.com/}}, something that in itself might prove to be fascinating research. This approach might also yield explainable insights into the transformers beyond attention\cite{Chefer2020}, with the training of NLPs providing contextual information that might be purposefully excluded or altered. \subsubsection{Continuous Learning} \label{sec:continuous learning} Continuous learning is recognised as being of particular importance where context is vital \cite{Chen2016, Lomonaco2020}, and IAQA is one such area where context and new available data take precedence. For instance, DPChallenge has added 120k new images since AVA Bench marking dataset was scraped and compiled in 2012. We have web-scraped these along with the associated meta data. With more than one new challenge being created per calendar month, which, in addition, have frequently updated comments. One particularly exciting area within this is the potential for training and predicting image quality in real time, and comparing with completion ground truth as this develops. This would also have the potential to be leveraged for commercial purposes, where finding and ranking new image content may be part of obtaining a competitive advantage - especially where photo and advertising need new content that is of high quality that also reflects, for example, current fashion trends. Such a model might be able to provide insights into what is different between trending styles and well established image quality features. IAQA is a rich field, with many publications - <300 associated with the AVA benchmarking dataset. Performing a meta analysis of the domain, including mapping citation relationships and datasets, would be a rich exercise and contribute new knowledge to the field. This would also support the production of a data dictionary for IAQA. The code repository associated with this dissertation provides a formative example of how this might be structured\footnote{\href{https://github.com/fdsig/iaqa}{https://github.com/fdsig/iaqa}}. Many of the IAQA datasets reviewed in chapter \ref{chap:Literature_Review} shown in table \ref{Tab:IAQA Datasets} and in the appendix \ref{chap:Appendix}. These provide subclass granularity, as well as quality classes. \subsubsection{Social Networks} \label{sec:social networks} The online community within dp.challenge.com is clearly well established, with trust and supportive feedback being provided by members. In addition to leveraging the comments data outlined above, it may also be a fruitful avenue of research to use deep learning for community learning and to map the online community \cite{Jin2017, Wu2020b}. This may also feed into other areas of research where online community is central, such as deep fake detection \cite{Ajao2019}. The dp.challenge.com community might also have a vested interest in such areas, with a growing potential for deep fake images being presented in competitions. Further, this may also have the benefit of providing new meta data on the AVA benchmarking dataset, with the ability to leverage data on voting patterns within competitions - something that is presently absent and has been the source of criticism of the AVA benchmarking dataset. \subsection{Recommendations and Applications} \label{sec:recommended applications} While IAQA as a classification task in its own right is interesting, there are also several areas of commercially that could be further developed - such as a mobile application. This might involve research into how best to prune networks and make different trade-offs, such as whether false positives or false negatives are preferable. Further, model size would be an important consideration within on device real time IAQA inference - increasingly compact and efficient CNNs\cite{Feng2019} might be appropriate. Many of the models trained would be too large to use for inference on a mobile phone device. Areas that haven't been explored are: \begin{itemize} \item Art applications have not been fully explored; \item Predicting individuals as well as groups as a commercial application; \item Embedding quality estimation within a mobile application. \end{itemize} Novel applications may come from \textit{deeper} rather than \textit{broader} research. For instance, using art databases to learn the aesthetics of particular periods of art history. This would involve formulating IAQA problems in different ways, such as conducting unsupervised learning to, for example, learn the aesthetic space of a particular artistic movement. Aesthetic classifiers as filers on databases: this may be useful in areas such as medical imaging, to be able to either apply or select from high quality images alongside having applications in mobile device filter. On-device image enhancement: in a world where much of our lives are recorded on mobile devices, this is clearly something that has a great deal of commercial potential, as is automated enhancement or selection of product photographs such as \cite{idealo2021} \
{ "alphanum_fraction": 0.8069125722, "avg_line_length": 100.0422535211, "ext": "tex", "hexsha": "0cfe11acf5a92971c0f100a5ca331b01f91476e3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ef755cb72dce8edb51e484a7e8e1845b76f2f0f8", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "fdsig/iaqa_dissertaion", "max_forks_repo_path": "Conclusion.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ef755cb72dce8edb51e484a7e8e1845b76f2f0f8", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "fdsig/iaqa_dissertaion", "max_issues_repo_path": "Conclusion.tex", "max_line_length": 733, "max_stars_count": null, "max_stars_repo_head_hexsha": "ef755cb72dce8edb51e484a7e8e1845b76f2f0f8", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "fdsig/iaqa_dissertaion", "max_stars_repo_path": "Conclusion.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2832, "size": 14206 }
\subsection{Projection algorithm} \paragraph{} Point projection algorithm is to find the nearest point in parameter $(u)$ on the NURBS curve of the test point. In the proposed method, all points on the NURBS curve is generated based on the approximated polylines and hence are not exactly on the boundaries. Although there exists a closed form solution for point projection, it require the order of the NURBS curve must be less than $4$ \citep{Pie1997}. As a consequence, a projection algorithm \citep{MA200379} using Newton-Raphson method is introduced to tackle this problem. \paragraph{} For a given point $P=(x,y)$, its projection on the curve $C(u$ so that the distance $|P-C(u)|$ is minimum is targeted. However, in the proposed method, the existence of the large number of the possible curves increase the computational cost significantly. The projection point for the test point $P$ need to be determined for every existing curves and the one with the smallest minimum distance will be selected. One possible improvement could be limit the possible curves to only a few by utilizing the fact that the NURBS curves has been divided into multiple sub-curves without interior knot by knot insertion introduced in Sec.~\ref{lr_sec:nurbs_knot_ins} Another property that can be utilized is that most of the test point $P$ is expected to be extremely close to its projection on the curve $C(u)$. \paragraph{} As a consequence, the strong convex hull property can be adopted to limit the number of possible curves to less than $2$. The building of the convex hull is explained in detail in Sec.~\ref{qdt_sc:convex_hull}. The signed distance of the test point to all curves' convex hull is calculated and only the curves with negative signed distance which indicate that the point is in the convex hull will be selected as candidates. If no negative distance is detected, a few number (taken as $3$ in the proposed method) of curves with minimum signed distance will be selected. \paragraph{} In order to find the projection of the test point $P$ on the curve $C$, target function $f$ can be expressed as \begin{equation} f(u) = \mathbf{C}^\prime (u) \cdot (\mathbf{C}(u) - \mathbf{P}) \end{equation} % When $f(u)$ gives $0$, the point either located on the curve or the distance $|\mathbf{C}(u) - \mathbf{P}|$ is minimal. and two scalars $f$ and $g$ are defined as % The iteration can be concluded as \begin{equation} u_{i+1} = u_i - \frac{ f(u_i) }{ f^\prime(u_i) } \label{qdt_eq:projection_iteration} \end{equation} After one iteration is finished, the following criteria are checked in sequence. \paragraph{1} Is the point coincide with $C(u_i)$ \begin{equation*} |\mathbf{C} (u_i) - \mathbf{P}| \leq \epsilon_1 \end{equation*} % where $\epsilon_1$ stands for the tolerance for distance in Euclidean space. \paragraph{2} Is the cosine zero \begin{equation*} \frac{ |\mathbf{C}^\prime (u) \cdot (\mathbf{C}(u) - \mathbf{P})| }{ |\mathbf{C}^\prime (u)| |\mathbf{C}(u) - \mathbf{P}| } \leq \epsilon_2 \end{equation*} % where $\epsilon_2$ stands for the tolerance for cosine. If either of these conditions are met, the iteration is terminated. Otherwise Eq.~\ref{qdt_eq:projection_iteration} is performed to find the parameter $u_{i+1}$ for next iteration. \paragraph{3} Make sure $u$ and $v$ are within there domains \begin{equation*} u_{i+1} \in [a,b] \end{equation*} % where $a$ and $b$ are the lower and upper bounds for the knot vector of curve $C$. If the curve is open \begin{equation} \left\{ \begin{array}{rl} u_{i+1} = a & u_{i+1} < a \\ u_{i+1} = b & u_{i+1} > b \end{array} \right. \end{equation} % If the curve is closed \begin{equation} \left\{ \begin{array}{rl} u_{i+1} = b - ( a - u_{i+1} ) & u_{i+1} < a \\ u_{i+1} = a + ( u_{i+1} - b ) & u_{i+1} > b \end{array} \right. \end{equation} % \paragraph{4} The difference between the new parameter $u_{i+1}$ and the old one $u_i$ is insignificant \begin{equation*} | (u_{i+1} - u_i)\mathbf{C}^\prime(u_i) | \leq \epsilon_1 \end{equation*} The iteration will be terminated if this condition is meet. \subsection{Convex hull in 2D} \label{qdt_sc:convex_hull} \paragraph{} The convex hull property of the NURBS curve indicates that all points on the curve must be contained within the convex hull constructed by its control points \citep{SELIMOVIC2009772} There are great number of algorithm that can be used including gift wrapping \citep{Cormen:2009:IAT:1614191}, graham scan \citep{ANDERSON197853}, quick hull \citep{Barber:1996:QAC:235815.235821}, Chan's algorithm \citep{Chan1996} and so on \citep{doi:10.1137/0215021, ANDREW1979216}. The quick hull is adopted in the proposed as it provides a computationally efficient and stable algorithm. The algorithm utilize the idea of `` divide and conquer'' to build the convex hull with an expected time complexity of $O(nlog(n))$ and $O(n^2)$ for the worst case. Generally speaking, it works as expected in most of the situation except for the case of high symmetry or most of the points located at the circumference of a circle. The algorithm can be implemented with following steps: \begin{enumerate} \item Find the most left and right points (points with minimal and maximum $x$) since they are proved to be part of the convex hull. \item Connect these two points and use the line to separate other points into two group. \item Find the point with maximum distance to the line in step 2 in any group. \item Construct a triangle with two points in step 2 and the point in step 3. \item Eliminate all points contained by these two subsets in step 4. \item Repeat the previous three steps and the distance calculated in step 2 is determined as the point to the triangle instead of the line in step 1. \item Terminate the iteration when no points are left \end{enumerate}
{ "alphanum_fraction": 0.7261029412, "avg_line_length": 52.4912280702, "ext": "tex", "hexsha": "8fd134e9fd1b7395c6861dd64747896c34bed224", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c397ddc18e5ff5d6e9b8d6de2e53be4c9c7b7a2d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "fa93hws/thesis", "max_forks_repo_path": "quadtree/projection.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c397ddc18e5ff5d6e9b8d6de2e53be4c9c7b7a2d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "fa93hws/thesis", "max_issues_repo_path": "quadtree/projection.tex", "max_line_length": 283, "max_stars_count": 1, "max_stars_repo_head_hexsha": "c397ddc18e5ff5d6e9b8d6de2e53be4c9c7b7a2d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "fa93hws/thesis", "max_stars_repo_path": "quadtree/projection.tex", "max_stars_repo_stars_event_max_datetime": "2019-10-30T12:14:47.000Z", "max_stars_repo_stars_event_min_datetime": "2019-10-30T12:14:47.000Z", "num_tokens": 1646, "size": 5984 }
\documentclass[12pt]{amsart} \usepackage{superdate} \usepackage{times} \usepackage{mathptmx} \usepackage{courier} \usepackage{hyperref} \usepackage[margin=1in]{geometry} \usepackage{graphicx} \title{Matgraph By Example} \author{Edward Scheinerman} \address{Department of Applied Mathematics and Statistics\\ The Johns Hopkins University\\ Baltimore, Maryland 21218-2682 USA} \email{[email protected]} \newcommand\matlab{MATLAB} \newcommand\matgraph{\textsc{Matgraph}} \newcommand\ER{Erd\H{o}s-R\'enyi} \newcommand{\RR}{\mathbb{R}} \newcommand{\ZZ}{\mathbb{Z}} \date{\superdate} \begin{document} \maketitle This document illustrates the use of \matgraph\ through the use of specific examples. Some of the concepts (such as the notion of \emph{declaring} \verb|graph| objects) are explained in the accompanying users' guide \emph{Matgraph: A \matlab\ Toolbox for Graph Theory} that you should read in conjunction with this document. A description of all the \matgraph\ functions can be found in the accompanying web pages in the \verb|html| directory. We assume that you have a reasonable command of \matlab. \section{Getting Started} \subsection{Download \matgraph} \label{sect:download} To use \matgraph, you need to download the \matgraph\ compressed tar archive file from the website \begin{verbatim} http://www.ams.jhu.edu/~ers/matgraph \end{verbatim} Click on the the words ``clicking here'' in the paragraph that begins ``You can download Matgraph by clicking here.'' This places a file named \verb|matgraph-X.Y.tgz| on your computer (where \verb|X.Y| is the version number). Double clicking this file or issuing the Unix command \begin{verbatim} tar xfz matgraph-X.Y.tgz \end{verbatim} (replace \verb|X.Y|) should extract a directory (folder) named \verb|matgraph| that you can place anywhere you wish on your computer. \subsection{Design Principles} \matgraph\ is designed to make interactive graph theory computation simple by building on the power of \matlab. Before we begin in earnest, there are important principles behind the design of \matgraph\ that you must understand. \begin{enumerate} \item All graphs in \matgraph\ are simple and undirected; there are no loops, multiple edges, or directed edges. \item The vertex set of graphs in \matgraph\ is always of the form $\{1,2,\ldots,n\}$ for some integer $n$. One implication of this principle is that when a vertex is deleted, all vertices with larger index are renumbered. (It is possible to attach a label to a vertex that is distinct from its vertex number). \item Graph variable must be declared prior to use (see \S\ref{sect:first-session}). If a graph variable is declared within a \verb|.m| function file, then it must be ``released'' before the function exits. Declaration and release are accomplished with the commands \begin{verbatim} g = graph; \end{verbatim} and \begin{verbatim} free(g) \end{verbatim} \item \matgraph\ functions are capable of changing their arguments. For example, the command \verb|delete(g,1,2)| deletes the edge $\{1,2\}$ from the graph \verb|g|; the variable \verb|g| is modified by this command. (This is unusual for \matlab.) If a \matgraph\ function takes two (or more) graph arguments then only the first argument to the function might be changed; all subsequent arguments are left unmodified. \end{enumerate} \subsection{A first session} \label{sect:first-session} Launch \matlab\ and issue a command that looks like this: \begin{verbatim} >> addpath /home/ralph/Programming/matgraph/ \end{verbatim} This tells \matlab\ where to find the \matgraph\ toolbox. Of course, replace the pathname with the location of the \verb|matgraph| folder that you downloaded as described in \S\ref{sect:download}. For the rest of this document, we tacitly assume that you have given this command before using \matgraph. If you like, you may add this command to your \verb|startup.m| file (see the \matlab\ documentation for more detail). Next, we \emph{declare} a graph variable \verb|g|: \begin{verbatim} >> g = graph Graph system initialized. Number of slots = 500. Graph with 0 vertices and 0 edges (full) \end{verbatim} Unlike most \matlab\ variables, graph variables \emph{must} be declared; see the users' guide for more detail. Next set \verb|g| to be the Petersen graph: \begin{verbatim} >> petersen(g) >> g Graph with 10 vertices and 15 edges (full) \end{verbatim} The command \verb|petersen(g)| overwrites \verb|g| with the Petersen graph. Now we draw the graph in a figure window: \begin{verbatim} >> ndraw(g) \end{verbatim} The command \verb|ndraw| draws the graph and writes each vertex's number inside its circle. See Figure~\ref{fig:petersen}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/petersen} \end{center} \caption{Petersen's graph.} \label{fig:petersen} \end{figure} Note that the embedding of the graph is imparted to \verb|g| by the command \verb|petersen|. In addition to \verb|ndraw|, \matgraph\ provides the following variations: \verb|draw| (draw the graph with vertices drawn as hollow circles), \verb|ldraw| (draw the graph with the vertices inscribed with their labels---different from their vertex numbers), and \verb|cdraw| (draw the graph with colored vertices). Type, for example, \verb|help cdraw| for more information. It is well known that Petersen's graph is not Hamiltonian. To verify this type: \begin{verbatim} >> hamiltonian_cycle(g) ans = [] \end{verbatim} The empty matrix indicates that no Hamiltonian cycle was found. However, if we delete any vertex from \verb|g|, the graph is Hamiltonian: \begin{verbatim} >> delete(g,1) >> hamiltonian_cycle(g) ans = 1 2 3 4 9 7 5 8 6 \end{verbatim} The command \verb|delete(g,1)| deletes vertex number 1 from the graph. \textbf{Notice that the \texttt{delete} command changes the graph.} By a bit of fancy footwork, \matgraph\ functions are able to modify arguments of type \verb|graph|---this is counter to the usual \matlab\ call-by-value semantics. Many of the \matlab\ commands modify their graph arguments, but the following convention is observed: If a command is capable of modifying a graph, \emph{only the first argument to the command can by modified}. \textbf{Notice that the vertices have been renumbered.} The output of \verb|hamiltonian_cycle| reports that $$ 1 \to 2 \to 3 \to 4 \to 9 \to 7 \to 5 \to 8 \to 6 \to 1 $$ is a Hamiltonian cycle in \verb|g|. At first this may be confusing since we had previous deleted vertex 1 from the graph. \matgraph\ follows the convention that the vertex set \emph{always} consists of consecutive integers beginning with 1. When a vertex is deleted from a graph, all vertices with higher numbers are renumbered accordingly. To see this, type this: \begin{verbatim} >> clf >> ndraw(g) \end{verbatim} The result is shown in Figure~\ref{fig:petersen-vertex}. \begin{figure}[ht] \includegraphics[scale=0.5]{figs/petersen-vertex} \caption{Petersen's graph with a vertex deleted is Hamiltonian.} \label{fig:petersen-vertex} \end{figure} Notice that we issued the \matlab\ command \verb|clf| before drawing \verb|g|. The \verb|clf| command clears the current figure window. This is necessary because \matgraph's drawing commands draw their graphs on top of whatever is already in the figure window (without erasing the figure window first). If we are done using the graph variable \verb|g|, we should \emph{not} simply give the usual \matlab\ command \verb|clear g|. Rather, we do this: \begin{verbatim} >> free(g) >> clear g \end{verbatim} The command \verb|free(g)| releases the graph \verb|g|'s ``slot'' in a hidden data structure. When \matgraph\ starts up (with the first \verb|g=graph| command or by an explicit invocation of \verb|graph_init|) a specific number of slots are allocated for graphs (at this writing, 500 slots). Each time a graph variable is declared (by typing \verb|g=graph|), one of these slots is used; the command \verb|free(g)| releases the slot held by the graph. See the users' guide for more detail. To wipe out the entire hidden data structure (and all the graphs contained therein) you can use \verb|graph_destroy|. One more important point. The typical behavior of \matlab's assignment operator is to make a copy. So if \verb|A| is a matrix, \verb|B=A| sets \verb|B| to be an independent copy of \verb|A|. Changes to \verb|B| do not affect \verb|A|. However, \matgraph\ graph objects behave differently. If \verb|g| is a graph, then the command \verb|h=g| does \emph{not} make a separate copy of \verb|g|, and any modification to \verb|h| also modifies \verb|g|. It is nearly certain this is not the behavior you desire. Instead, do this: \begin{verbatim} >> g = graph Graph with 0 vertices and 0 edges (full) >> petersen(g) >> h = graph Graph with 0 vertices and 0 edges (full) >> copy(h,g) >> delete(h,1,2) >> h Graph with 10 vertices and 14 edges (full) >> g Graph with 10 vertices and 15 edges (full) \end{verbatim} The \verb|copy(h,g)| overwrites \verb|h| with an independent copy of \verb|g|. \section{Basics} \subsection{A path} One of the simplest graphs is a path on $n$ vertices, $P_n$. Here we create such a graph in \matgraph. \begin{verbatim} >> g = graph Graph system initialized. Number of slots = 500. Graph with 0 vertices and 0 edges (full) >> for k=1:9, add(g,k,k+1), end >> ndraw(g) \end{verbatim} This creates the graph $P_{10}$ and draws it in a figure window. Notice that the command \verb|add(g,u,v)| adds the edge $uv$ to the graph. The variables $u$ and $v$ must be distinct positive integers; otherwise the command has no effect. If vertices $u$ and $v$ are already in the graph, the edge $uv$ is simply added. However, if a graph has $n$ vertices and either $u$ or $v$ is greater than $n$, then the graph's vertex set is first expanded to $\max\{u,v\}$ vertices and then the edge is added. [Remember, the vertex set of a graph in \matgraph\ is \emph{always} of the form $\{1,2,\ldots,n\}$.] Notice that the drawing of \verb|g| places the vertices around a circle. If a graph does not have an embedding (e.g., the \verb|petersen| command imparts an embedding to its argument), then then the drawing commands (such as \verb|ndraw|) give the graph a default embedding by placing the vertices around a circle. Now here is a simpler way to create (and view) $P_{10}$: \begin{verbatim} >> path(g,10) >> clf >> ndraw(g) \end{verbatim} The command \verb|path(g,10)| overwrites \verb|g| with a path on 10 vertices together with a sensible embedding---the vertices are arranged in a straight line. \subsection{Adding and deleting} Let's create the graph formed by deleting a perfect matching from $K_{10}$. Here are the commands: \begin{verbatim} >> complete(g,10) >> for k=1:5, delete(g,k,k+5), end >> clf >> ndraw(g) \end{verbatim} The command \verb|complete(g,10)| overwrites \verb|g| with $K_{10}$. (We assume that we are simply continuing from the previous section so the graph \verb|g| has already been declared.) The \verb|delete(g,u,v)| command deletes the edge $uv$ from the graph (assuming it exists). This 3-argument version of \verb|delete| does not remove any vertices from the graph. To delete vertex $u$ from a graph (and all its incident edges) give the command \verb|delete(g,u)|. Type \verb|help graph/delete| to see all the various ways \verb|delete| can remove vertices and edges from a graph. To delete all vertices (and hence, all edges) from a graph, type \verb|resize(g,0)|. To delete all edges (but no vertices) from a graph, type \verb|clear_edges(g)|. \medbreak Creating the graph formed from $K_{5,5}$ by deleting a perfect matching is similar: \begin{verbatim} >> complete(g,5,5) >> for k=1:5, delete(g,k,k+5), end >> clf >> ndraw(g) \end{verbatim} The command \verb|complete(g,m,n)| overwrites \verb|g| with the complete bipartite graph $K_{m,n}$. Type \verb|help graph/complete| to see what else \verb|complete| can do. \medbreak Now try this: \begin{verbatim} >> resize(g,0) >> add(g,3,6) >> clf >> ndraw(g) \end{verbatim} The command \verb|resize(g,0)| converts \verb|g| to an empty (vertexless) graph. \verb|add(g,3,6)| asks to add an edge between vertices 3 and 6, but since these vertices are not (yet) in the graph, the vertex set of \verb|g| is expanded to $\{1,2,3,4,5,6\}$ and then the edge is added. Consequently, there are four isolated vertices in \verb|g| as the picture reveals. \medbreak Next we create the M\"obius ladder on 12 vertices (a 12-cycle plus edges between diametrically opposite vertices). \begin{verbatim} >> cycle(g,12) >> elist = [1:6;7:12]' elist = 1 7 2 8 3 9 4 10 5 11 6 12 >> add(g,elist) >> clf; ndraw(g) \end{verbatim} \verb|cycle(g,12)| overwrites \verb|g| with $C_{12}$. Next, we prepare a $6\times2$ matrix \verb|elist| that specifies the extra edges we plan to add to \verb|g|. The line \verb|elist = [1:6;7:12]'| is standard \matlab\ to create this matrix. Then \verb|add(g,elist)| adds all the edges in \verb|elist| to \verb|g|. \subsection{Neighbors, degrees, etc.} Create a grid graph like this: \begin{verbatim} >> grid(g,3,4) >> clf;ndraw(g) >> nv(g) ans = 12 >> ne(g) ans = 17 \end{verbatim} The grid is drawn nicely as shown in Figure~\ref{fig:grid34}. \begin{figure}[ht] \includegraphics[scale=0.5]{figs/grid34} \caption{The $3\times4$ grid graph.} \label{fig:grid34} \end{figure} Notice that \verb|nv| and \verb|ne| report the number of vertices and edges, respectively, of the graph. Also try \verb|size(g)|, \verb|disp(g)|, or simply typing \verb|g| on a line by itself. We can learn the degree of a vertex, or the entire degree sequence of the graph, like this: \begin{verbatim} >> deg(g,1) ans = 2 >> deg(g,2) ans = 3 >> deg(g) ans = 2 3 2 3 4 3 3 4 3 2 3 2 \end{verbatim} To learn the neighbors of a vertex, we have two choices: \begin{verbatim} >> neighbors(g,2) ans = 1 3 5 >> g(2) ans = 1 3 5 \end{verbatim} [Note, the syntax \verb|g(v)| does not seem to work inside \verb|.m| file functions.] To test if two vertices are adjacent we can use the \verb|has| command or the syntax \verb|g(u,v)| [which does not seem to work inside \verb|.m| file functions]. \begin{verbatim} >> has(g,1,2) ans = 1 >> has(g,1,5) ans = 0 >> g(1,2) ans = 1 >> g(1,5) ans = 0 \end{verbatim} We can verify that this graph is connected \begin{verbatim} >> isconnected(g) ans = 1 \end{verbatim} and find a shortest path between vertices 1 and 12: \begin{verbatim} >> find_path(g,1,12) ans = 1 2 3 6 9 12 \end{verbatim} Try \verb|dist(g,1,12)| to see that the distance between these vertices is 5. \subsection{Matrices} To get the adjacency matrix of, say, the Petersen graph, do this: \begin{verbatim} >> petersen(g) >> A = matrix(g) A = 0 1 0 0 1 1 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 1 0 1 0 0 0 1 0 0 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 0 0 0 0 1 1 0 0 0 0 0 0 1 1 0 0 1 0 0 0 0 0 0 1 1 0 0 1 0 0 1 0 0 0 1 0 0 0 1 0 1 1 0 0 0 0 0 0 0 1 0 1 1 0 0 \end{verbatim} Next, we attempt to find the eigenvalues of this matrix, but run into trouble: \begin{verbatim} >> eig(A) ??? Function 'eig' is not defined for values of class 'logical'. \end{verbatim} The problem is that \matgraph's \verb|matrix| command returns a Boolean matrix (entries represent \texttt{true} and \texttt{false}), but it is simple to convert this to a numerical matrix and get the eigenvalues: \begin{verbatim} >> A = double(A); >> eig(A) ans = -2.0000 -2.0000 -2.0000 -2.0000 1.0000 1.0000 1.0000 1.0000 1.0000 3.0000 \end{verbatim} Use \verb|laplacian| to get the Laplacian matrix of a graph. The command \verb|spy(g)| is equivalent to \verb|spy(matrix(g))|; this creates a square image with a dot in position $i,j$ exactly when $ij$ is an edge of $g$. It is also possible to define a graph by specifying its adjacency matrix: \begin{verbatim} >> A = ones(6)-eye(6) A = 0 1 1 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 0 1 1 1 1 1 1 0 >> set_matrix(g,A) >> g Graph with 6 vertices and 15 edges (full) \end{verbatim} See also: \verb|incidence_matrix|. \subsection{Standard graph constructors} \matgraph\ includes many functions for creating specific, standard graphs. We have encountered a few already: \verb|path|, \verb|cycle|, \verb|complete|, \verb|petersen|, and \verb|grid|. In addition to these, there are built-in methods for creating the Platonic solid graphs (for example, \verb|dodecahdron|), wheels, Paley graphs, and so forth. See the on-line documentation (in the \verb|matgraph/html| directory) for a complete list of all graph methods. Graphs can also be built up from other graphs using graph operations; these are explored in \S\ref{sect:ops}. Worthy of special mention are various methods to generate random graphs including \verb|random|, \verb|random_bipartite|, \verb|random_regular|, and \verb|random_tree|. \section{Embeddings} \subsection{Basics} As we have seen, graphs created in \verb|matgraph| can be drawn on the screen. A graph may have an embedding that is simply a specification of $x,y$-coordinates for all of the vertices. Edges are always drawn as line segments. Some graph constructors (e.g., \verb|petersen|) imbue their graphs with a prespecified embedding. However, if we start with a new graph and simply add vertices and edges, no embedding is created for the graph: \begin{verbatim} >> g = graph Graph system initialized. Number of slots = 500. Graph with 0 vertices and 0 edges (full) >> for k=1:5, add(g,k,k+1), end >> g Graph with 6 vertices and 5 edges (full) >> hasxy(g) ans = 0 \end{verbatim} This creates the path $P_6$ but no embedding is associated with the graph; this is observed with the \verb|hasxy| command. If we try to draw a graph that lacks an embedding, \matgraph\ gives the graph a default embedding in which the vertices are placed around a circle. \begin{verbatim} >> draw(g) >> hasxy(g) ans = 1 \end{verbatim} We can specify the embedding for a graph by giving specific $x,y$-coordinates. Suppose we want to site the vertices of $P_6$ at $(1,0)$, $(2,0)$, \ldots, $(6,0)$; we can do this: \begin{verbatim} >> xy = [ 1:6 ; zeros(1,6) ]' xy = 1 0 2 0 3 0 4 0 5 0 6 0 >> embed(g,xy) >> clf;draw(g) \end{verbatim} If a graph possesses an embedding, \verb|rmxy(g)| removes the embedding. To see the embedding of a graph, use \verb|getxy|. A random embedding can be given to a graph with \verb|randxy(g)|. See also the \verb|scale| function. \subsection{Automatic graph layout} \matgraph's default embedding---vertices uniformly around a circle---is usually unaesthetic and difficult to read. Fortunately, \matgraph\ provides a way to create embeddings automatically. Unfortunately, the one viable method we provide---\verb|distxy|---is slow and requires\footnote{If the Optimization Toolbox is not included with your version of \matlab, it is available (for a fee) from The MathWorks.} the Optimization Toolbox. Nevertheless, \verb|distxy| gives reasonable results for moderately sized graphs. We invite readers who are expert in graph drawing algorithms to submit alternatives for inclusion in future releases of \matgraph. The \verb|distxy| embedding attempts to place vertices in a graph in the plane so that their graph theoretic distance equals the embedded vertices Euclidean distance. This is possible for path graphs, but otherwise is unattainable. Instead, we create a score function that measures how closely we achieve this goal and then use the services of the Optimization Toolbox to find a (local) minimum solution. Here is an example. \begin{verbatim} >> resize(g,0) >> random_tree(g,10) >> clf;draw(g) \end{verbatim} This creates a random tree with $10$ vertices and displays the tree in its default embedding. See the left portion of Figure~\ref{fig:randtree}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.3]{figs/randtree-yuck} \includegraphics[scale=0.3]{figs/randtree-nice} \end{center} \caption{A random tree with its default embedding (left) and with a nice embedding found by \texttt{distxy} (right).} \label{fig:randtree} \end{figure} Now we compute an embedding using \verb|distxy|. \begin{verbatim} >> distxy(g) Optimization terminated: relative function value changing by less than OPTIONS.TolFun. Embedding score = 2.7511 Elapsed time is 0.941816 seconds. ans = 2.7511 >> clf;draw(g) \end{verbatim} The result is show in the right portion of Figure~\ref{fig:randtree}. \section{Helper Classes: Partitions and Permutations} \matgraph\ includes two classes that are useful for supporting work: partitions and permutations. \subsection{Partitions} A \emph{partition} is a set of pairwise disjoint, nonempty subsets of a set $A$ whose union is $A$. In \matgraph, all partitions must be of a set of the form $[n]=\{1,2,\ldots,n\}$. \verb|partition| variables do not need to be declared (only \verb|graph| objects require that special treatment). Partitions are useful in graph theory. In \matgraph\, the functions to find the connected components of a graph or to find a coloring of a graph return \verb|partition| objects. There are a few ways to create a partition. The most basic is this: \begin{verbatim} >> p = partition(8) { {1} {2} {3} {4} {5} {6} {7} {8} } \end{verbatim} The command \verb|partition(n)| creates a default partition of $[n]$ in which each element is in a part by itself. Alternatively, we can form a \verb|partition| from a \matlab\ cell array. Each cell in the cell array is a list (vector) of integers; taken together, these cells should contain all of the numbers from $1$ to $n$ (for some $n$) exactly once. Here is an example. \begin{verbatim} >> c = cell(3,1); >> c{1} = [1 3 5]; >> c{2} = [4 6 7]; >> c{3} = [2 8]; >> p = partition(c) { {1,3,5} {2,8} {4,6,7} } \end{verbatim} The statement \verb|c = cell(3,1);| builds a $3\times1$ cell array (a fundamental \matlab\ data structure). The next three lines populate the array with three lists of numbers. Finally, the statement \verb|p = partition(c)| assigns to \verb|p| a partition with the expected blocks. The \verb|merge| command is used to combine parts in a partition. Continuing with the example above, we type this: \begin{verbatim} >> merge(p,1,2) { {1,2,3,5,8} {4,6,7} } >> p { {1,3,5} {2,8} {4,6,7} } \end{verbatim} \verb|merge(p,1,2)| forms a new partition in which the parts containing elements 1 and 2 are combined into a single part. Note that \verb|merge| does not alter \verb|p|; this is normal \matlab\ behavior. Now try this: \begin{verbatim} >> p(1) ans = 1 3 5 >> p(1,2) ans = 0 \end{verbatim} The command \verb|p(v)| returns (as a list) the elements in \verb|v|'s block. The command \verb|p(v,w)| returns 1 (true) if \verb|v| and \verb|w| are in the same block, and returns 0 (false) otherwise. There are other ways to extract the parts of a partition. \begin{verbatim} >> pts = parts(p); >> pts{1} ans = 1 3 5 >> pts{2} ans = 2 8 >> pts{3} ans = 4 6 7 \end{verbatim} The function \verb|parts| returns the parts of a partition as a cell array. \begin{verbatim} >> array(p) ans = 1 2 1 3 1 3 3 2 \end{verbatim} The \verb|array| function returns an index number for each element; an element has index number $i$ if it is in the $i^{\text{th}}$ part of the partition. The binary operators \verb|==| and \verb|!=| can be used to test if two partitions are equal or unequal. The binary operators \verb|+| and \verb|*| can be used to compute the join and meet of two partitions. \verb|nv(p)| returns the size of the ground set of the partition and \verb|np(p)| returns the number of blocks in the partition. See also \verb|size(p)|. \subsection{Permutations} A \emph{permutation} is a bijection of a set $A$ to itself. In \matgraph, the set $A$ is always of the form $[n] = \{1,2,\ldots, n\}$. A new permutation is created with the \verb|permutation| function: \begin{verbatim} >> permutation(9) (1)(2)(3)(4)(5)(6)(7)(8)(9) \end{verbatim} The \verb|permutation(n)| command creates the identity permutation of $[n]$. The \verb|permutation| function can also be used to create a permutation from a list of numbers. \begin{verbatim} >> vec = [ 1 3 5 2 4 6 ]; >> permutation(vec) (1)(2,3,5,4)(6) >> \end{verbatim} Here, the permutation is given by the matrix $$ \pi = \begin{bmatrix} 1&2&3&4&5&6 \\ 1&3&5&2&4&6 \end{bmatrix} $$ The list \verb|vec| gives the bottom row. This notation means that $\pi(1)=1$, $\pi(2)=3$, $\pi(3)=5$, $\pi(4)=2$, $\pi(5)=4$, and $\pi(6)=6$. A random permutation can be created like this: \begin{verbatim} >> p = permutation(9); >> p = random(p) (1,7,6,2,4,9)(3,8)(5) \end{verbatim} \matgraph\ defines \verb|*| to denote permutation composition. The notation \verb|p(j)| applies the permutation \verb|p| to element \verb|j|: \begin{verbatim} >> p(2) ans = 4 \end{verbatim} The inverse of a permutation can be calculated like this: \begin{verbatim} >> inv(p) (1,9,4,2,6,7)(3,8)(5) >> p^-1 (1,9,4,2,6,7)(3,8)(5) \end{verbatim} In general, \verb|p^m| is the $m$-fold composition of \verb|p| with itself; \verb|m| may be negative. \begin{verbatim} >> matrix(p) ans = 0 0 0 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 >> array(p) ans = 7 4 8 9 5 2 6 3 1 \end{verbatim} \verb|matrix(p)| creates a permutation matrix and \verb|array(p)| gives the lower row of the permutation when written in $2\times n$-matrix notation. \begin{verbatim} >> c = cycles(p); >> c{1} ans = 1 7 6 2 4 9 >> c{2} ans = 3 8 >> c{3} ans = 5 \end{verbatim} The \verb|cycles| function creates a cell array containing the permutation's cycles. \section{Vertex Numbers and Labels} \matgraph\ rigidly enforces the rule that the vertex set of any graph must be of the form $\{1,2,\ldots,n\}$. If we delete some vertices of the graph, other vertices are renumbered and this can make associating a vertex's original number with its new number difficult. Also, one may wish to give a vertex an alphanumeric name. To deal with these issues, \matgraph\ provides a mechanism for labeling the vertices of a graph with arbitrary text strings. Try this: \begin{verbatim} >> g = graph Graph with 0 vertices and 0 edges (full) >> cycle(g,8) >> label(g) >> label(g,3,'X') >> delete(g,4) >> ldraw(g) \end{verbatim} When a graph is first created, there are no labels associated with its vertices. The command \verb|label(g)| causes \verb|g|'s vertices to be given default labels. The default label assigned to a vertex is simply a string containing the digits of its vertex number (e.g., vertex 23 would be labeled \verb|'23'|). The command \verb|label(g,3,'X')| labels vertex number 3 with the string \verb|'X'|. The label need not be a single character, we could have labeled this vertex like this: \verb|label(g,3,'three')|. Next we delete vertex number 4. This renumbers vertices 5 through 8 to new numbers 4 through 7. However, the label associated with a vertex remains the same. That is, the vertex now numbered 4 (and formally numbered 5) has the label \verb|'5'|. Finally, the \verb|ldraw| command draws the graph with each vertex's label written in the middle of its circle. See Figure~\ref{fig:labeled-path}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/labeled-path} \end{center} \caption{A drawing of a labeled graph.} \label{fig:labeled-path} \end{figure} To learn the label of a vertex, or to extract a cell array containing all the labels of a graph, use \verb|get_label|. It is possible to assign two different vertices the same label, so there need not be a one-to-one correspondence between vertices and label. \section{Graph Operations} \label{sect:ops} \matgraph\ provides various operations to perform on graphs. Here we present some examples. \begin{verbatim} >> g = graph Graph system initialized. Number of slots = 500. Graph with 0 vertices and 0 edges (full) >> complete(g,[2,3,4]) >> deg(g) ans = 7 7 6 6 6 5 5 5 5 >> complement(g) >> deg(g) ans = 1 1 2 2 2 3 3 3 3 >> \end{verbatim} This sets \verb|g| to be the complete multipartite graph $K_{2,3,4}$, and then overwrites \verb|g| with its own complement, $\overline{K_{2,3,4}}$. This is equivalent to the disjoint union $K_2\oplus K_3 \oplus K_4$. \matgraph\ can compute disjoint unions of graphs like this: \begin{verbatim} >> cycle(g,5) >> h = graph; >> cycle(h,6) >> k = graph; >> disjoint_union(k,g,h) >> k Graph with 11 vertices and 11 edges (full) \end{verbatim} This code resets \verb|g| to be the 5-cycle, defines a new graph variable \verb|h| to be a 6-cycle, and then places the disjoint union of these graphs in a third graph \verb|k|. See also the \verb|union| command. The complement of $C_5 \oplus C_6$ can now be computed using \verb|complement(k)|. Alternatively, we can do this: \begin{verbatim} >> complement(g); % g is now the complement of C_5 (which is C_5) >> complement(h); % h is now the complement of C_6 >> join(k,g,h) >> h Graph with 6 vertices and 9 edges (full) \end{verbatim} The \verb|join| commands overwrites its first argument with a graph formed from the disjoint union of its second and third arguments, plus all possible edges between these latter two graphs. The \emph{Cartesian product} of graphs $G$ and $H$ is a new graph $G\times H$ defined as follows: \begin{align*} V(G\times H) &= V(G) \times V(H) = \{(v,w): v \in V(G), w \in V(H)\} \\ E(G\times H) &= \bigl\{ \{(v_1,w_1),(v_2,w_2)\} : [v_1v_2 \in E(G) \text{ and } w_1=w_2] \text{ or } [v_1=v_2 \text{ and } w_1w_2 \in E(H)] \bigr\} \end{align*} We illustrate how to calculate Cartesian product in \matgraph: \begin{verbatim} >> clf;draw(k) >> cycle(g,10) >> cycle(h,3) >> cartesian(k,g,h) >> k Graph with 30 vertices and 60 edges (full) >> distxy(k) Optimization terminated: relative function value changing by less than OPTIONS.TolFun. Embedding score = 51.6601 Elapsed time is 5.263166 seconds. ans = 51.6601 >> clf;draw(k) \end{verbatim} The resulting drawing is shown in Figure~\ref{fig:product-graph}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/product-graph} \end{center} \caption{The Cartesian product $C_{10}\times C_3$.} \label{fig:product-graph} \end{figure} The hypercube $Q_n$ is defined to be the $n$-fold product $K_2 \times K_2 \times \cdots \times K_2$. The command \verb|cube(g,n)| overwrites \verb|g| with the graph $Q_n$. \matgraph\ can find spanning trees in (connected) graphs. The two commands \verb|bfstree| and \verb|dfstree| find breadth-first and depth-first spanning trees of their respective graphs. \begin{verbatim} >> dodecahedron(g) >> bfstree(h,g) >> clf; draw(g,':') >> draw(h) \end{verbatim} The command \verb|bfstree(h,g)| overwrites \verb|h| with a breadth-first spanning tree of \verb|g| rooted at vertex 1. (To start from another vertex, use \verb|bfstree(h,g,v)|.) We then draw the original graph \verb|g| using dotted lines (the extra argument to \verb|draw|) and then draw the spanning tree (using the default solid lines) without erasing the first drawing. The result is in Figure~\ref{fig:bfstree}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/bfstree} \end{center} \caption{A breadth-first spanning tree of the dodecahedron graph.} \label{fig:bfstree} \end{figure} \matgraph\ can also find Hamiltonian cycles (but only in small graphs). Here's an example. \begin{verbatim} >> dodecahedron(g) >> hamiltonian_cycle(h,g); >> clf;draw(g,':') >> draw(h) \end{verbatim} The result is in Figure~\ref{fig:ham-cycle}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/ham-cycle} \end{center} \caption{A Hamiltonian cycle in the dodecahedron graph.} \label{fig:ham-cycle} \end{figure} \matgraph\ can form induced subgraphs. \begin{verbatim} >> cycle(g,10) >> induce(h,g,[1 2 3 4 5 9]) >> h Graph with 6 vertices and 4 edges (full) \end{verbatim} The \verb|induce| command above overwrites \verb|h| with the induced subgraph of \verb|g| generated by the vertex set $\{1,2,3,4,5,9\}$. This makes \verb|h| the graph consisting of a 5-path (on vertices 1 through 5) plus an isolated vertex (now numbered 6 in \verb|h|). The new vertex 6 in \verb|h| inherits the label of vertex 9 in \verb|g| (assuming \verb|g| was labeled). The \verb|trim| command is useful for removing vertices of degree 0. More generally, \verb|trim(g,d)| removes all vertices of degree at most $d$ from \verb|g|, and then repeats this operation on the resulting graph until \verb|g| has minimum degree at least $d+1$ (or all vertices have been deleted). \section{Graph Computations} \subsection{Basic invariants} As discussed earlier, \verb|nv(g)| and \verb|ne(g)| returns the number of vertices and edges in \verb|g|, respectively. \verb|size(g)| reports the same information as a list. The independence number, clique, and domination number can be computed by \matgraph; note that the computation of these invariants requires \matlab's Optimization Toolbox. \begin{verbatim} >> g = graph Graph system initialized. Number of slots = 500. Graph with 0 vertices and 0 edges (full) >> icosahedron(g) >> alpha(g) % compute the independence number Optimization terminated. ans = 3 >> omega(g) % compute the clique number Optimization terminated. ans = 3 >> dom(g) % compute the domination number Optimization terminated. ans = 2 \end{verbatim} In each case, we can find the realizing set (independent, clique, or dominating) with an extra output argument: \begin{verbatim} >> [d,S] = dom(g) Optimization terminated. d = 2 S = 4 7 >> sort([g(4),g(7),4,7]) ans = 1 2 3 4 5 6 7 8 9 10 11 12 \end{verbatim} \subsection{Connection} \matgraph\ can determine if a graph is connected, find paths between vertices, and determine distances. We illustrate this on the ``Bucky ball'' graph: the molecular graph of Buckminsterfullerene $C_{60}$ (a ball comprised of 60 carbon atoms) or, equivalently, the graph implicitly drawn on a soccer ball. \begin{verbatim} >> bucky(g) >> isconnected(g) ans = 1 >> find_path(g,1,60) ans = 1 5 4 21 22 23 52 51 55 60 >> diam(g) ans = 9 >> dist(g,1,60) ans = 9 >> \end{verbatim} \matgraph\ can find the connected components of a graph; these are returned as a partition object: \begin{verbatim} >> complete(g,[2,3,4]) >> complement(g) >> components(g) { {1,2} {3,4,5} {6,7,8,9} } >> component(g,3) ans = 3 4 5 \end{verbatim} The last function, \verb|component(g,v)|, returns a list of the vertices in \verb|v|'s component of \verb|g|. The \verb|split| command finds a reasonable partition of the vertices of a graph into two sets that are more tightly clustered among themselves than between the two sets. For example, consider a graph formed by combining two disjoint copies of $K_8$ linked by a single edge. This is a connected graph, but clearly divides into two natural clusters. Here we show how this works in \matgraph: \begin{verbatim} >> h = graph Graph with 0 vertices and 0 edges (full) >> complete(g,8) >> disjoint_union(h,g,g) >> add(h,1,16) >> components(h) { {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16} } >> split(h) { {1,2,3,4,5,6,7,8} {9,10,11,12,13,14,15,16} } \end{verbatim} \subsection{Coloring} Perhaps the most celebrated invariant in graph theory is the chromatic number of a graph, $\chi(G)$. This is the minimum number of colors needed so that we can color the vertices of $G$ such that adjacent vertices have different colors. Equivalently, this is the minimum number of blocks in a partition of $V(G)$ into independent sets. The \verb|color| command can be used to find such a partition. The default \verb|color(g)| performs a greedy coloring of the graph (which might not be optimal). Other algorithms can be specified; for example, to get a true optimal coloring, use \verb|color(g,'optimal')|. This, of course, may take a long time for large graphs. \begin{verbatim} >> icosahedron(g) >> color(g) { {1,6,8} {2,4,11} {3,5,12} {7,9,10} } >> bucky(g) >> c1 = color(g); >> size(c1) ans = 60 4 >> c2 = color(g,'optimal'); >> size(c2) ans = 60 3 >> cdraw(g,c2) \end{verbatim} Notice that the greedy coloring produces a proper 4-coloring of the graph, but the best-possible coloring is with three colors. See Figure~\ref{fig:bucky} produced by this code. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/bucky} \end{center} \caption{An optimal coloring of the Bucky ball.} \label{fig:bucky} \end{figure} The \verb|cdraw| command draws a graph with a given coloring. Note that the coloring need not be a proper coloring. Here is an example: \begin{verbatim} >> grid(g,5,5) >> c = split(g); >> clf; cdraw(g,c) \end{verbatim} The result is in Figure~\ref{fig:split-grid}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/split-grid} \end{center} \caption{A $5\times5$ grid partitioned into two sets of vertices by \texttt{split}.} \label{fig:split-grid} \end{figure} \matgraph\ can find the chromatic polynomial of small graphs. \begin{verbatim} >> cube(g,3) >> chromatic_poly(g) ans = 1 -12 66 -214 441 -572 423 -133 0 \end{verbatim} This tells us that $$ \chi(Q_3;x) = x^8 -12 x^7 + 66 x^6 - 215 x^5 + 441 x^4 -572 x^3 + 423 x^2 -133 x . $$ If a graph has a two-coloring (i.e., if the graph is bipartite) then we can use \verb|bipartition| to find the two color classes. \begin{verbatim} >> cycle(g,8) >> bipartition(g) { {1,3,5,7} {2,4,6,8} } \end{verbatim} Given a partition of a graph into two sets, we can find a maximum matching between those sets with \verb|bipmatch|. \begin{verbatim} >> random_bipartite(g,6,6,.5) >> bipartition(g) { {1,2,3,4,5,6} {7,8,9,10,11,12} } >> bipmatch(g,ans) ans = 1 7 2 8 3 9 4 11 5 10 6 12 \end{verbatim} \section{Sparse Graphs} Graphs in \matgraph\ are housed in symmetric matrices. \matlab\ can hold matrices either as \emph{full} or \emph{sparse} arrays. The amount of memory used by a full array is proportional to the number of entries in the matrix, while the memory used by a sparse array is proportional to the number of nonzero entries in the matrix. Graphs in \matgraph\ are held, behind the scenes, in either full or sparse matrices. To find out which, use the functions \verb|isfull| or \verb|issparse|. Alternatively, simply typing the graph variable's name reveals its storage type. \begin{verbatim} >> petersen(g) >> g Graph with 10 vertices and 15 edges (full) \end{verbatim} For large graphs with relatively few edges, sparse storage is preferable; indeed, full storage may not be feasible because the computer might not have enough RAM to hold the matrix. To convert a graph to sparse storage, simply type \verb|sparse(g)|. \begin{verbatim} >> sparse(g) >> cycle(g,1000) >> g Graph with 1000 vertices and 1000 edges (sparse) \end{verbatim} When declaring a new graph variable, one may specify the number of vertices in the constructor: \verb|h = graph(n)|. If \verb|n| is large, then sparse storage is used. \begin{verbatim} >> k = graph(10000) Graph with 10000 vertices and 0 edges (sparse) \end{verbatim} How large is ``large''? This is controlled by the function \verb|set_large|. \section{Input and Output} \subsection{Saving graphs to disk with \texttt{save} and \texttt{load}} The usual mechanisms for saving variables to disk do not work for \verb|graph| variables in \matgraph. Were you to attempt to save a \verb|graph| variable, or the entire \matlab\ workspace, the graphs you have created will be lost when you try to load them back in. This is one of the prices we pay for creating a fast call-by-reference system. Instead, \matgraph\ provides its own \verb|save| and \verb|load| commands. \verb|save(g,filename)| saves the graph \verb|g| to a file in the current directory on your hard drive. A subsequent call to \verb|load(g,filename)| overwrites the graph \verb|g| with the graph saved in the file. Here is an example: \begin{verbatim} >> g = graph Graph system initialized. Number of slots = 500. Graph with 0 vertices and 0 edges (full) >> petersen(g) >> save(g,'pete') >> free(g) >> g Invalid graph object (index 1) >> clear g >> g = graph Graph with 0 vertices and 0 edges (full) >> g Graph with 0 vertices and 0 edges (full) >> load(g,'pete') >> g Graph with 10 vertices and 15 edges (full) \end{verbatim} \subsection{SGF: Simple Graph Format} \label{sect:sgf} The \matgraph\ function \verb|sgf| is a mechanism to convert \verb|graph| objects to and from a two-column matrix format called Simple Graph Format. For a graph with $n$ vertices and $m$ edges, the Simple Graph Format matrix has either $m+1$ or $n+m+1$ rows. The first row of the matrix gives the number of vertices and the number of edges in the graph. The following $m$ rows specify the edges of the graph. Optionally, an additional $n$ rows specify the $x,y$-coordinates of the embedding of the graph. Here is an example. \begin{verbatim} >> complete(g,4) >> sgf(g) ans = 4 6 1 2 1 3 2 3 1 4 2 4 3 4 >> distxy(g) Optimization terminated: relative function value changing by less than OPTIONS.TolFun. Embedding score = 0.34315 Elapsed time is 0.079532 seconds. ans = 0.3431 >> sgf(g) ans = 4.0000 6.0000 1.0000 2.0000 1.0000 3.0000 2.0000 3.0000 1.0000 4.0000 2.0000 4.0000 3.0000 4.0000 1.3651 1.3939 1.2374 2.5943 0.7011 1.9303 1.9014 2.0580 \end{verbatim} Not only can \verb|sgf| be used to create a Simple Graph Format matrix from a graph, it can also be used to specify a graph. For example, here we create the SGF matrix for the graph $K_{1,5}$ and an embedding using \matlab\ commands, and then build a graph based on that matrix. \begin{verbatim} >> edges = [ ones(5,1), [2:6]' ] edges = 1 2 1 3 1 4 1 5 1 6 >> xy = [ 0 0 ; -2 1 ; -1 1 ; 0 1 ; 1 1 ; 2 1 ]; >> S = [ 6 5 ; edges ; xy ] S = 6 5 1 2 1 3 1 4 1 5 1 6 0 0 -2 1 -1 1 0 1 1 1 2 1 >> sgf(g,S) >> clf;draw(g) \end{verbatim} The result is show in Figure~\ref{fig:star}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/star} \end{center} \caption{A star graph created using a Simple Graph Format matrix.} \label{fig:star} \end{figure} The Simple Graph Format is useful for working with other computing environments. You may have, say, a C++ program that you use to create graphs. You can have that program write the graph to disk in simple graph format. Then, using the usual \matlab\ \verb|load| command, the two-column matrix can be read from disk and converted into a graph. \subsection{A C++ graph parser} Inside the main \matgraph\ directory, you can find a subdirectory named \verb|tools| that contains a further subdirectory named \verb|graph_parser|. This directory contains a C++ program to build a command-line tool that reads textual graph data from the standard input and writes its output to a file named \verb|parsed_graph.m|. This can then be converted into a graph in \matgraph\ by giving the command \verb|parsed_graph(g)|. Here are the steps you need to take to make this work. \subsubsection*{Compile the program} We assume basic knowledge of the Unix shell (Linux, Mac OS X, Cygwin on Windows, etc.) and that your computer has a C++ compiler installed. (This has been tested using the GNU compiler \verb|g++|.) To build the program, simply change directory to the \verb|graph_parser| directory and type \verb|make|: \begin{verbatim} $ cd /home/username/matgraph/tools/graph_parser/ $ make g++ -ansi -O -c -o main.o main.cc g++ -ansi -O -c -o LineParser.o LineParser.cc g++ main.o LineParser.o -o graph_parser $ \end{verbatim} The program \verb|graph_parser| is created. This can be moved to any convenient location. \subsubsection*{Graph data file} The \verb|graph_parser| program reads a specific type of data file. Vertices are named as character strings (henceforth, ``words'') such as \verb|head-node| or \verb|city| or \verb|123|. No white space may appear in the name of a vertex and vertex names are case sensitive (the word \verb|hello| is not the same as \verb|Hello|). A typical line in the data file contains the name of exactly two words; such a line indicates that there is an edge between the named vertices. If there are more than two words on a line, only the first two words are processed; the rest of the line is ignored. In order to accommodate isolated vertices, a line in the data file may contain just a single word. This tells \verb|graph_parser| that the given word is the name of a vertex. If this word has not been previously encountered (e.g., on a previous line as part of an edge), then this names a new vertex in the graph. If a line begins with the same word twice, the second instance of the word is ignored and this line is treated as if it contained only one word. Finally, if a line is blank or if a line begins with the sharp character \verb|#|, then the line is ignored (this is useful for annotating the data file). A typical input file (named \verb|test|) is included in the \verb|graph_parser| directory; we show the contents of that file here: \begin{verbatim} one two one three four five two two six and the rest of this line is ignored seven eight nine nine two one one <-- a loop is not created two nine eight seven eight one fifty four six seven six four five three five # this line should be skipped nine seven \end{verbatim} \subsubsection*{Convert the data file into a \texttt{.m} file} Once the data file is prepared, we use \verb|graph_parser| to convert the data file into a \verb|.m| file that can be run in \matlab. In the shell, give the following command: \begin{verbatim} ./graph_parser < filename \end{verbatim} where \verb|filename| is the name of the file containing the graph data. The result of running the program \verb|graph_parser| is the creation of a file named \verb|parsed_graph.m| in the same directory in which \verb|graph_parser| was run. You need to have write permission for that directory or \verb|graph_parser| will complain: \begin{verbatim} Unable to open file parsed_graph.m for output \end{verbatim} The file \verb|parsed_graph.m| can be moved to any convenient location. You should not change the name of this file because it is a \matlab\ function. If you wish to process two (or more) graph files, run \verb|graph_parser| on the first data file and then read the graph into \matlab\ (explained next) before processing subsequent data files. \subsubsection*{Run the \texttt{.m} file in \matlab} The final step is to execute the function \verb|parsed_graph| inside \matlab. \begin{verbatim} >> parsed_graph(g) >> g Graph with 9 vertices and 13 edges (full) >> distxy(g) Optimization terminated: relative function value changing by less than OPTIONS.TolFun. Embedding score = 2.5448 Elapsed time is 0.210241 seconds. ans = 2.5448 >> ldraw(g) \end{verbatim} The graph defined textually in \verb|test| is now saved as \verb|graph| object in \matgraph\ and can be handled like any other such graph. The drawing of this graph is shown in Figure~\ref{fig:parsed}. \begin{figure}[ht] \begin{center} \includegraphics[width=\textwidth]{figs/parsed} \end{center} \caption{A drawing of a graph read into \matgraph\ via the \texttt{graph\_parser} program.} \label{fig:parsed} \end{figure} \subsection {Connecting with other programs} It is possible to create \matlab\ programs to write graphs to files in other formats. Included with \matgraph\ are ways to do this for Graphviz and OmniGraffle. \subsubsection*{Saving graphs for Graphviz} Graphviz is a graph visualization tool available from the website \begin{verbatim} http://www.graphviz.org/ \end{verbatim} One of the Graphviz tools is named \verb|dot|, and \matgraph\ includes a function also named \verb|dot| to convert \verb|graph| objects into a format that can be read by Graphviz's \verb|dot|. The \matgraph\ command has the form \verb|dot(g,'filename.dot')|. This writes a file to the computer's disk that can then be used by Graphviz. Here is an example of how to do this: \begin{verbatim} >> cube(g,4) >> dot(g,'four-cube.dot') Wrote "four-cube.dot" \end{verbatim} The file \verb|four-cube.dot| is now read into a Graphviz tool to produce attractive drawings such as the one shown in Figure~\ref{fig:four-cube}. \begin{figure}[ht] \begin{center} \includegraphics[scale=0.5]{figs/four-cube} \end{center} \caption{A picture of $Q_4$ produced by exporting a graph from \matgraph\ and then laid out using GraphViz.} \label{fig:four-cube} \end{figure} See the Graphviz website for more information. \subsubsection*{Saving graphs for OmniGraffle} OmniGraffle is a graph drawing program for Macintosh available from this website: \begin{verbatim} http://www.omnigroup.com/ \end{verbatim} \matgraph\ can save graphs in a format that can be read by OmniGraffle. The \matgraph\ command \verb|graffle(g,'filename.graffle')| writes the graph to disk. Double clicking the created file launches OmniGraffle. Here's an example: \begin{verbatim} >> cube(g,3) >> graffle(g,'cube.graffle') \end{verbatim} Using OmniGraffle's layout tool, we can produce a nice embedding of the graph as shown in Figure~\ref{fig:graffle}. \begin{figure}[ht] \includegraphics[scale=0.5]{figs/graffle} \caption{A picture of $Q_4$ produced by exporting a graph from \matgraph\ and then laid out using OmniGraffle.} \label{fig:graffle} \end{figure} To a limited extent, it is possible to convert graphs prepared in OmniGraffle for import into \matgraph. Inside the \verb|matgraph/tools| directory resides a program named \verb|graffle2sgf.py|. This is a Python language program so, in order to run it you must have Python installed on your computer. This program takes as input a graph saved by OmniGraffle and returns as output a matrix specifying the graph in Simple Graph Format (see \S\ref{sect:sgf}). Suppose you have created a graph using OmniGraffle and saved it on your hard disk in a file called \verb|mygraph.graffle|. Issue the following command in the Unix shell: \begin{verbatim} ./graffle2sgf.py < mygraph.graffle > mygraph \end{verbatim} This reads the graph saved by OmniGraffle and writes the relevant data into the file \verb|mygraph|. Now, inside \matlab, do the following: \begin{verbatim} >> load mygraph >> sgf(g,mygraph) >> g Graph with 5 vertices and 6 edges (full) \end{verbatim} The \matlab\ command \verb|load mygraph| reads the file \verb|mygraph| and saves the matrix contained therein into a variable that is also named \verb|mygraph|. The command \verb|sgf(g,mygraph)| overwrites \verb|g| with the graph specified by the SGF matrix \verb|mygraph|. The \verb|graffle2sgf.py| tool is not completely reliable. It works well on diagrams that contain only nodes and edges. If there are other extraneous lines or text in the diagram (which an OmniGraffle diagram certainly may have), then the program can get confused and give poor performance. Readers are invited to submit a better version. \end{document} %%% Local Variables: %%% mode: latex %%% TeX-master: t %%% End:
{ "alphanum_fraction": 0.7011859502, "avg_line_length": 31.9836660617, "ext": "tex", "hexsha": "fa66220c04ac0b71086aeaed104549bd791e9241", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-04-05T11:43:17.000Z", "max_forks_repo_forks_event_min_datetime": "2020-04-05T11:43:17.000Z", "max_forks_repo_head_hexsha": "1f0ea9951341f4da5b05f01a826b851cf78737dd", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Seregon/Matlab", "max_forks_repo_path": "FEX/matgraph/doc/by-example/by-example.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "1f0ea9951341f4da5b05f01a826b851cf78737dd", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Seregon/Matlab", "max_issues_repo_path": "FEX/matgraph/doc/by-example/by-example.tex", "max_line_length": 79, "max_stars_count": 1, "max_stars_repo_head_hexsha": "1f0ea9951341f4da5b05f01a826b851cf78737dd", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Seregon/Matlab", "max_stars_repo_path": "FEX/matgraph/doc/by-example/by-example.tex", "max_stars_repo_stars_event_max_datetime": "2017-09-08T09:10:07.000Z", "max_stars_repo_stars_event_min_datetime": "2017-09-08T09:10:07.000Z", "num_tokens": 16192, "size": 52869 }
\subsection{Verification of the Parameter Estimation Module} Using a PSO algorithm followed by an LMA optimization, the Drucker-Prager plasticity model with ductile damage is then fitted to the homogenized DEM simulation data in order to obtain an optimal parameter set. Each simulation is fit to 50 points defining the homogenized stress-strain curve resulting in a total of 200 data points for all four DEM simulations at different confining stresses. The PSO algorithm uses a swarm size of 24 for 100 generations which is found to be sufficient to converge to a consistent solution. Here, the CDM model is confined laterally by the homogenized horizontal DEM stress and vertical displacements are prescribed by the homogenized vertical DEM strain with the parameter estimation algorithms programmed to match the horizontal strain and the veritcal stress. Because of the large variation in observation magnitudes (between stress/strain and from different confining stresses), each curve is weighted with a normalization factor to prevent the large stress values from dominating parameter estimation. In addition, a linear weighting scheme is applied to each curve to give larger influence to the loading section and lesser influence to the post-damage section. Parameter bounding limits are required by the optimization algorithms in order to limit the search space in these optimization algorithms. These limits are chosen based on two criteria: physical limitations and numerical stability. If there exist physical limitations that prevent parameters from exceeding certain values or if there exists a range of realistic values that the parameter should not deviate from, then those physical limitations are specified as the bounds. In other cases, the parameter bounds come from numerical limitations such that beyond a certain capacity, certain parameter values would cause the simulations to become unstable. In these cases, a combination of the two bounding methods is used. The specified bounding limits for each parameter results can be seen in Table \ref{tab:paramDrucker}. \begin{table}[!htbp] \centering \caption{Parameter Estimation Results for Drucker-Prager Model with Ductile Damage} \label{tab:paramDrucker} \begin{tabular}{@{}cccccc@{}} \toprule \textbf{Parameter} & \textbf{Symbol} & \textbf{Units} & \textbf{\begin{tabular}[c]{@{}c@{}}Lower \\ Bound\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}Upper \\ Bound\end{tabular}} & \textbf{\begin{tabular}[c]{@{}c@{}}Optimal \\ Value\end{tabular}} \\ \midrule Young's Modulus & $E$ & $GPa$ & $1$ & $25$ & $1.8$ \\ Poisson's Ratio & $\nu$ & & $0.1$ & $0.4$ & $0.15$ \\ Dilation Angle & $\psi$ & $^{\circ}$ & $5$ & $15$ & $22$ \\ Flow Stress Ratio & $K$ & & $0.78$ & $1$ & $0.81$ \\ Friction Angle & $\beta$ & $^{\circ}$ & $45$ & $60$ & $56$ \\ Initial Compressive Yield Strength & $\sigma_c^{iy}$ & $kPa$ & $1$ & $100$ & $52$ \\ Peak Compressive Yield Strength & $\sigma_c^{p}$ & $MPa$ & $0.5$ & $5$ & $3.1$ \\ Strain at Peak Compressive Yield & $\epsilon_c^{p}$ & $\%$ & $0.5$ & $5$ & $1.7$ \\ Yield Strain at -0.5 Triaxiality & $\bar{\epsilon}^{pl}_{f_{-0.5}}$ & $\%$ & $0.01$ & $0.1$ & $0.0078$ \\ Yield Strain at -0.6 Triaxiality & $\bar{\epsilon}^{pl}_{f_{-0.6}}$ & $\%$ & $0.1$ & $10$ & $0.30$ \\ Plastic Displacement at Failure & $\bar{u}^{pl}_f$ & $m$ & $0.01$ & $1$ & $0.12$ \\ \bottomrule \end{tabular} \end{table} The stress-strain curves from the DEM simulations used for the parameter estimation and the stress-strain curves of the CDM simulations using the optimal parameter set are presented in Figure \ref{fig:fitted1}. The CDM fit is good with a Root-Mean-Square Error (RMSE) of $1.03MPa$ and the pressure dependent yield function works well with this model as the error is not biased to curves of a certain confining stress. This fit implies a strong likelihood that the model will be valid under confining stresses outside of the range fitted. Also, the damage initiation points at the peak of the curve are well correlated and indicate that the triaxiality based damage initiation criterion is a good model for this problem. The majority of the error in the curves is found in the post-yield behaviour. This error results from limitations in the continuum constitutive model because the post-yield behaviour of the DEM simulations is discontinuous in nature (stick-slip response). The CDM model cannot accommodate for such oscillations and thus represents the post-yield response as an average.
{ "alphanum_fraction": 0.4887501814, "avg_line_length": 222.2258064516, "ext": "tex", "hexsha": "3acc1c395a53929420ba0d2e686e589a4ad2faa9", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2020-06-29T23:14:09.000Z", "max_forks_repo_forks_event_min_datetime": "2020-06-29T23:14:09.000Z", "max_forks_repo_head_hexsha": "9c9043effdb72a608ffec11726af97154751722e", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "yetisir/up-scaling-dem-simulations", "max_forks_repo_path": "subsection_Verification_of_the_Parameter__.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9c9043effdb72a608ffec11726af97154751722e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "yetisir/up-scaling-dem-simulations", "max_issues_repo_path": "subsection_Verification_of_the_Parameter__.tex", "max_line_length": 1090, "max_stars_count": null, "max_stars_repo_head_hexsha": "9c9043effdb72a608ffec11726af97154751722e", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "yetisir/up-scaling-dem-simulations", "max_stars_repo_path": "subsection_Verification_of_the_Parameter__.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1227, "size": 6889 }
\chapter{\label{chapter6} The System Testing} The code testing has been conducted during the entire development of the project. During the last weeks the system has been tested on the whole, therefore a proper set of \fwap programs has been written and collected within the folder \textsl{Code_funW@P}. All those files may also be a good reference to write new \fwap code. Here is the list of the tests and what aspects of the project they are checking: \begin{itemize} \item \textit{helloWorld.fun}, it is the classiv test function for any language all over the world \item \textit{bracket.fun}, it tests bracketed expressions and operators \item \textit{multipleDeclarations.fun}, it attempts to declare lots of variables, all together! \item \textit{anonymousClosureText.fun}, it is the example for function closure reported in the text of the assignment \cite{exercise} \item \textit{anonymousFuncs.fun}, yet another test for closures of anonymous function which defines a tick() counter \item \textit{anonymousFunctionClosure.fun}, the last anonymous functions test \item \textit{boolReduceAsync.fun}, it asynchronously computes the maximum among five numbers according to a reduce pattern \item \textit{fibonacci.fun}, a test in honour of a well famous "`pisano"', computing the 6th number of the fibonacci sequence \item \textit{fibAsync.fun}, it asynchronously computes the first five number of the Fibonacci sequence \item \textit{ECTS.fun}, it is a test for nested if's but is also a useful program converting university grades from the italian system into the European Credit Transfer System \cite{ects} \item \textit{whileAsync.fun}, it repeatedly runs four asynchronous computations \item \textit{whileDasync.fun}, same as before but with \texttt{dasync\{\}} \item \textit{whileAsyncDasync.fun}, mixed \texttt{dasync\{\}} and\texttt{async\{\}}, same computation as before \item \textit{maxTwoDasyncRemote.fun}, computes the maximum among four numbers with \texttt{dasync\{\}} \item \textit{whileRead.fun}, if you input the right number, it gives you a very nerdy Answer. \end{itemize}
{ "alphanum_fraction": 0.7906755471, "avg_line_length": 87.5833333333, "ext": "tex", "hexsha": "e992d9e4ec6728f2371dbedc4051c5aa39745752", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6bdfbedfa0dc8fec7e25b81665624c6aedc93e3d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "MCSN-project2014/APproject", "max_forks_repo_path": "docs/chapters/testing.tex", "max_issues_count": 25, "max_issues_repo_head_hexsha": "6bdfbedfa0dc8fec7e25b81665624c6aedc93e3d", "max_issues_repo_issues_event_max_datetime": "2015-01-14T15:11:28.000Z", "max_issues_repo_issues_event_min_datetime": "2015-01-01T18:07:39.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "MCSN-project2014/APproject", "max_issues_repo_path": "docs/chapters/testing.tex", "max_line_length": 257, "max_stars_count": 1, "max_stars_repo_head_hexsha": "6bdfbedfa0dc8fec7e25b81665624c6aedc93e3d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "MCSN-project2014/APproject", "max_stars_repo_path": "docs/chapters/testing.tex", "max_stars_repo_stars_event_max_datetime": "2015-01-06T21:30:55.000Z", "max_stars_repo_stars_event_min_datetime": "2015-01-06T21:30:55.000Z", "num_tokens": 507, "size": 2102 }
\documentclass{article} %% Copyright 2013 Steven B. Segletes % % This work may be distributed and/or modified under the % conditions of the LaTeX Project Public License, either version 1.3 % of this license or (at your option) any later version. % The latest version of this license is in % http://www.latex-project.org/lppl.txt % and version 1.3c or later is part of all distributions of LaTeX % version 2005/12/01 or later. % % This work has the LPPL maintenance status `maintained'. % % The Current Maintainer of this work is Steven B. Segletes. % Revisions: % v1.01 Documentation revision % v1.1 Added \csname record\roman{@row}\endcsname to \readdef % v1.2 -Corrected the [truncated] LPPL license info % -Added \arrayij and \arrayijk, which can be put into \edef % -Used \romannumeral in preference to \roman{}, when possible, % to avoid unnecessary use of counters. % v1.3 -Moved \newread outside of \readdef, so as not to exhaust the % 16 allotted file streams (Thanks to Ken Kubota for the tip). \usepackage{tabstackengine}[2016-10-04] \usepackage{lmodern} \usepackage[T1]{fontenc} \parskip 1em \parindent 0em \newcommand\rl{\rule{1em}{0in}} \def\rdar{\textsf{readarray}} \def\loi{\textsf{listofitems}} \def\cmd#1{\texttt{\string\ \unskip#1}} \usepackage{readarray} \usepackage{verbatimbox} \usepackage{filecontents} \begin{filecontents*}{file1data.txt} A111 A112 A113 A114 A121 A122 A123 A124 A131 A132 A133 A134 A211 A212 A213 A214 A221 A222 A223 A224 A231 A232 A233 A234 \end{filecontents*} \begin{filecontents*}{file2data.txt} \def{\dataA}{% A111 A112 A113 A114 A121 A122 A123 A124 A131 A132 A133 A134 % A211 A212 A213 A214 A221 A222 A223 A224 A231 A232 A233 A234 } \end{filecontents*} \begin{filecontents*}{file3data.txt} \textit{am} , \textit{are}, have \textit{been}, have \textit{been} \textit{are}, \textit{are} , have \textit{been}, have \textit{been} \textit{is} , \textit{are} , has \textit{been} , have \textit{been} \textit{was} , \textit{were}, had \textit{been}, had \textit{been} \textit{were}, \textit{were}, had \textit{been}, had \textit{been} \textit{was} , \textit{were}, had \textit{been}, had \textit{been} will \textit{be}, will \textit{be}, will have \textit{been}, will have \textit{been} will \textit{be}, will \textit{be}, will have \textit{been}, will have \textit{been} will \textit{be}, will \textit{be}, will have \textit{been}, will have \textit{been} \end{filecontents*} \let\vb\verb \def\bs{{\ttfamily\char'134}} \reversemarginpar \marginparwidth 1.5in \newcommand\margcmd[1]{\marginpar{\hfill\ttfamily\char'134#1}} \begin{document} \begin{center} \LARGE The {\rdar} Package\\ \rule{0em}{.7em}\small Routines for inputting formatted array data and recalling it on an element-by-element basis.\\ \rule{0em}{2.7em}\large Steven B. Segletes\\ [email protected]\\ \rule{0em}{1.7em}\readarrayPackageDate\\ V\readarrayPackageVersion \end{center} \section*{Comments About Version 2.0} Version 2.0 of the \rdar{} package has brought major changes, including a \textit{new and improved} syntax. Functionally, the data-reading/parsing code of the package has been revised to use the powerful \loi{} package. This has two primary advantages: 1) the data that is read is no longer expanded prior to the read, so that macros can be read and stored in the data arrays using their unexpanded tokens; and 2) list separators other than a space may now be employed to parse the data into array cells. While a newer preferred syntax has been introduced for reading and recalling arrays, the deprecated syntax is still supported. The user will also note other small changes, such as the fact that errors arising from array-boundary violations now appear in the log file rather than the document itself. \section{Description and Commands} The {\rdar} package allows for the creation of data arrays (numeric, string, or even formatted) using either file contents or \vb|\def| format for input, such that the elements of multiple arrays can be set and later recalled in an orderly fashion, on a cell-by-cell basis. Routines have been developed to support the storage and recall of both 2-D and 3-D arrays, as well as 1-D file-record arrays.% \footnote{ Note: for 1-D arrays that are to be simply parsed on the basis of a specified separator, the \loi{} package is already prepared to do this, without the help of this package. } \clearpage The commands included in this package help the user to input data, define it in terms of array elements, and recall those elements at will. Those commands are: \itshape \textup{To place file data into a data macro:}\\ \rl\vb|\readdef{|filename\vb|}\|data-macro\\ \textup{To place file data into a 1-D file-record array:}\\ \rl\vb|\readrecordarray{|filename\vb|}\|array-identifier\\ \textup{To parse a data macro and place the results into a 2-D or 3-D array:}\\ \rl\vb|\readarray\|data-macro\vb|\|array-identifier\vb|[-,|columns\vb|]|% \hfill\textup{(2-D)}\\ \rl\vb|\readarray\|data-macro\vb|\|array-identifier\vb|[-,|rows\vb|,|columns\vb|]|% \hfill\textup{(3-D)}\\ \textup{Same as above, with leading/trailing spaces removed from array cells:}\\ \rl\vb|\readarray*\|data-macro\vb|\|array-identifier\vb|[-,|columns\vb|]|% \hfill\textup{(2-D)}\\ \rl\vb|\readarray*\|data-macro\vb|\|array-identifier\vb|[-,|rows\vb|,|columns\vb|]|% \hfill\textup{(3-D)}\\ \textup{Recall data from indexed array cell:}\\ \rl\vb|\|array-identifier\vb|[|row\vb|,|column\vb|]|% \hfill\textup{(2-D)}\\ \rl\vb|\|array-identifier\vb|[|plane\vb|,|row\vb|,|column\vb|]|% \hfill\textup{(3-D)}\\ \textup{To place the actual tokens of an array cell into a macro:}\\ \rl\vb|\arraytomacro\|array-identifier\vb|[-,|columns\vb|]\|macro% \hfill\textup{(2-D)}\\ \rl\vb|\arraytomacro\|array-identifier\vb|[-,|rows\vb|,|columns\vb|]\|macro% \hfill\textup{(3-D)}\\ \textup{To change the array-parsing separator character:}\\ \rl\vb|\readarraysepchar{|parsing-separator-char\vb|}|\\ \textup{To select the level of bounds checking on array cell recall:}\\ \rl\vb|\nocheckbounds|\hfill OR\hfill% \vb|\checkbounds|\hfill OR\hfill% \vb|\hypercheckbounds| \upshape In these commands, \cmd{}\textit{data-macro} is a command sequence into which the contents of \texttt{filename} are set into a \cmd{def}. The \textit{array-identifier} is a sequence of (catcode 11) letters that identify the array. The starred version of the commands are used if, during the array creation, it is desired to automatically excise the array data of leading and trailing spaces. Unlike earlier versions of this package, where error messages were output into the typeset document, error messages are now set in the log file. The level of error messaging is defined by the level of bounds checking, with \cmd{hypercheckbounds} providing the most intense level of error checking. When a bounds-checking error is found in an array invocation, in addition to the error message in the log file, a ``?'' is typeset in the document, unless bound checking is disabled with \cmd{nocheckbounds}. Several strings of fixed name are defined through the use the \cmd{readdef} command, which are accessible to the user: \itshape \rl\vb|\nrows|\\ \rl\vb|\ncols|\\ \rl\vb|\nrecords|\\ \rl\vb|\ArrayRecord[|record\vb|]|% \hfill\textup{(to retrieve record from most recent \cmd{readdef})}\upshape The macros \cmd{nrows} and \cmd{ncols}, which were gleaned from the file structure, may be used in the subsequent \cmd{readarray} invocation to specify the array dimensions. Alternately, those values may be manually overridden by specifying the desired values in the \cmd{readarray} invocation. Individual records of the original file, from the most recent \cmd{readdef}, may be recalled with the \cmd{ArrayRecord} macro. In addition to the strings of fixed name created during the \cmd{readdef}, there are various strings created during the \cmd{readarray} whose name is a function of the \textit{array-identifier}, such as \itshape \rl\vb|\|array-identifier\vb|CELLS|\\ \rl\vb|\|array-identifier\vb|PLANES|\\ \rl\vb|\|array-identifier\vb|ROWS|\\ \rl\vb|\|array-identifier\vb|COLS|\upshape where \textit{array-identifier} is the alphabetic-character string by which you have designated a particular array. Their meaning will be discussed later in this document. Support routines which are generally not required directly by the user for the specification and recall of data arrays, but which are useful for debugging include the following: \itshape \rl\vb|\arraydump\|array-identifier\\% \rl\vb|\scalardump\|array-identifier% \upshape These macros print out the complete array, in either a structured or unstructured form, respectively. \section{Data Structure} The first requirement is to lay out a format for the data interface to this package. The {\rdar} package is set up to digest data separated by a user-defined separator character. The default separator is a space character but, as of V2.0, the separator may be specified by way of \vb|\readarraysepchar{|\textit{separator}\vb|}|. The format for the data organization to be digested is as follows, for 2-D arrays: \TABstackTextstyle{\tiny} \setstackgap{L}{3pt} \newcommand\SEP{\,\langle\smash{\raisebox{1pt}{\tabbedCenterstack{s\\e\\p}}}\rangle\,} \renewcommand\arraystretch{1.1} {\arraycolsep=3pt\relax\small\( \begin{array}{lll@{\hspace{2pt}}ll} A_{11}\SEP &A_{12}\SEP &A_{13}\SEP & \ldots & A_{1\mathrm{(columns)}} \\ A_{21}\SEP &A_{22}\SEP & \ldots && \\ \vdots&&&&\\ A_{\mathrm{(rows)}1}\SEP &A_{\mathrm{(rows)}2}\SEP &A_{\mathrm{(rows)}3}\SEP & \ldots & A_{\mathrm{(rows)}\mathrm{(columns)}} \\ \end{array} \)} For 3-D arrays, the following structure is employed: {\arraycolsep=3pt\relax\small\( \begin{array}{lll@{\hspace{2pt}}ll} A_{111}\SEP &A_{112}\SEP &A_{113}\SEP & \ldots & A_{11\mathrm{(columns)}} \\ A_{121}\SEP &A_{122}\SEP & \ldots && \\ \vdots&&&&\\ A_{1\mathrm{(rows)}1}\SEP &A_{1\mathrm{(rows)}2}\SEP &A_{1\mathrm{(rows)}3}\SEP & \ldots & A_{1\mathrm{(rows)}\mathrm{(columns)}} \\ \rlap{\scriptsize$<$blank line$>$}&&&&\\ A_{211}\SEP &A_{212}\SEP &A_{213} & \ldots & A_{21\mathrm{(columns)}} \\ A_{221}\SEP &A_{222}\SEP & \ldots && \\ \vdots&&&&\\ A_{2\mathrm{(rows)}1}\SEP &A_{2\mathrm{(rows)}2}\SEP &A_{2\mathrm{(rows)}3}\SEP & \ldots & A_{2\mathrm{(rows)}\mathrm{(columns)}} \\ &&&&\\ \vdots&&&&\\ &&&&\\ A_{\mathrm{(planes)}11}\SEP &A_{\mathrm{(planes)}12}\SEP &A_{\mathrm{(planes)}13}\SEP & \ldots & A_{\mathrm{(planes)}1\mathrm{(columns)}} \\ A_{\mathrm{(planes)}21}\SEP &A_{\mathrm{(planes)}22}\SEP & \ldots && \\ \vdots&&&&\\ A_{\mathrm{(planes)}\mathrm{(rows)}1}\SEP &A_{\mathrm{(planes)}\mathrm{(rows)}2}\SEP &A_{\mathrm{(planes)}\mathrm{(rows)}3}\SEP & \ldots & A_{\mathrm{(planes)}\mathrm{(rows)}\mathrm{(columns)}} \\ \end{array} \)} Here,\,$\SEP${}\,is the data separator that is used to parse the input. Terms like $A_{\mathrm{(plane)}\mathrm{(row)}\mathrm{(column)}}$ refers to the \LaTeX{}-formatted data to be associated with the particlar plane, row, and column of data. Note, that for 3-D arrays, a blank line can be used to signify to the parsing algorithm the size of a data plane (alternately, the number of rows per data plane can be explicitly provided to the \cmd{readarray} command). \section{Getting Data into Array Structures\label{s:ex}} One can provide data to be digested by this package in one of two ways: either through an external file, or by way of ``cut and paste'' into a \vb|\def|. If one chooses the external file approach, the command \vb|\readdef|\margcmd{readdef} is the command which can achieve this result. The command takes two arguments. The first is the file in which the data is stored, while the second is the data macro into which the file's data will be placed, for example \rl\vb|\readdef{data.txt}{\dataA}|\readdef{file1data.txt}{\dataA} In this case, the contents of the file \vb|data.txt| will be placed into the data macro \vb|\dataA|. Two alterations to the format occur during this conversion from file to \cmd{def}: 1) blank lines in the file are ignored; and 2) a data separator replaces the end-of-line. At this point, the data is still not digested into a 2-D or 3-D data ``array.'' However, two things have been accomplished: 1) the file contents are \cmd{def}'ed into the data macro \cmd{dataA}; and 2) they are also placed into a 1-D file record array, \cmd{ArrayRecord}. There is no \textit{requirement} that the input file be organized with structured rows of data corresponding to individual file records, nor that blank lines exist between planes of data (if the data is 3-D). \textit{However}, there is a reason to do so, nonetheless. In particular, for datafiles that are organized in the preferred fashion, for example: \verbfilebox{file1data.txt} \rl\theverbbox a \vb|\readdef| attempts to estimate the number columns, and rows-per-plane of the dataset by analyzing the data structure. These estimates are given by \vb|\ncols|\margcmd{ncols} and \vb|\nrows|\margcmd{nrows}, in this case to values of \texttt{\ncols} and \texttt{\nrows}, respectively. Such data could prove useful if the array size is not known in advance. When \verb|\readdef| is invoked, a string \verb|\nrecords|\margcmd{nrecords} will also be set to the number of file records processed by the \vb|\readdef| command, in this case, to \texttt{\nrecords}. Finally, the 1-D file-record array, \cmd{ArrayRecord}\margcmd{ArrayRecord}, is created to allow access to the most recently read file records. For example, \vb|\ArrayRecord[3]| produces: ``\ArrayRecord[3]''. Note, however, that the array, \cmd{ArrayRecord}, will be overwritten on the subsequent invocation of \cmd{readdef}. Because \cmd{ArrayRecord} is only a 1-D file-record array, the \textit{actual} array metrics, given by \cmd{ArrayRecordCOLS}, \cmd{ArrayRecordROWS}, \cmd{ArrayRecordPLANES}, and \cmd{ArrayRecordCELLS} are \ArrayRecordCOLS, \ArrayRecordROWS, \ArrayRecordPLANES, and \ArrayRecordCELLS, respectively, which do not align with the estimations provided by \cmd{ncols} and \cmd{nrows}. In lieu of \verb|\readdef|, a generally less preferred, but viable way to make the data available is to cut and paste into a \vb|\def|. However, because a blank line is not permitted as part of the \vb|\def|, a filler symbol (\vb|%| or \vb|\relax|) must be used in its place, if it is desired to visually separate planes of data, as shown in the \verb|\def| example at the top of the following page. Note that the \vb|%| is also required at the end of the line containing \vb|\def|, in order to guarantee that, in this case, \vb|A111| is the first element of data (and not a space separator). However, unlike \vb|\readdef|, this definition will neither set the value of \vb|\ncols| nor \vb|\nrows|. \verbfilebox{file2data.txt} \rl\theverbbox Once the data to be placed into an array is available in a macro, by way of either \vb|\readdef| or \vb|\def|, the command to digest the data into an array is \vb|\readarray| for the case of 2-D or 3-D data. For 1-D file-record arrays, in contrast, the \cmd{readrecordarray} command is used to go directly from a file into the 1-D array, bypassing the intermediate step of a data macro. \subsection{1-D File-Record Arrays} If the desire is merely to parse a string of data based on a common data separator, such as a comma or any other character, there is no need to use the \rdar{} package. The \loi{} package, which is employed by \rdar, already has those provisions and should be used directly.% \begin{verbbox}[\footnotesize] \setsepchar{ } \readlist\oneDlist{\dataA} \oneDlistlen{} list items, 12th item is ``\oneDlist[12]''. \end{verbbox} \footnote{% For a simple 1-D list punctuated by data separators, one may use the \loi{} package directly:\\ \rl\theverbbox\\ which produces the following output: \setsepchar{ }% \readlist\oneDlist{\dataA}% \oneDlistlen{} list items, 12th item is ``\oneDlist[12]''. } On the other hand, if one wishes a 1-D file-record array, in which each array element corresponds to the record from a file, then \rdar{} can be used. The command \cmd{readrecordarray} can be used to stick the individual ``file records'' from a designated file into a 1-D array. The \cmd{readrecordarray} command takes two arguments: a file name containing data records, and the name of a 1-D record-array into which to place the file records. So, for example, with the invocation of \vb|\readrecordarray{data.txt}\oneD|, the data from the file \texttt{data.txt} is now saved in the \cmd{oneD} array, and can be retrieved, for example, the 3rd record, with \cmd{oneD[3]}, which returns \readrecordarray{file1data.txt}\oneD``\oneD[3]''. If an array name is reutilized, its prior definitions are cleared, so that ``old'' data is not inadvertantly retrieved following the reutilization. \subsection{Creating 2-D and 3-D Arrays} The \cmd{readarray}\margcmd{readarray} command, used to convert raw parsable data into data arrays, takes three arguments. The first is the data macro into which the unarrayed raw data had previously been stuffed (e.g., by way of \cmd{readdef} or \cmd{def}). The second is array-identifier macro into which the parsed data is to be placed. Finally, the last compound argument, enclosed in square brackets, denotes the rank and range of the array to be created. There is a starred version of the command, \cmd{readarray*}, which is used to remove leading/trailing spaces from the array elements, when parsed. This option, is only relevant when the data separator is not already a space. If an array name is reutilized, its prior definitions are cleared, so that ``old'' data is not inadvertantly retrieved following the reutilization. \subsubsection{2-D Arrays} \begin{sloppypar} For a 2-D array, this last argument of \cmd{readarray} will be of the form \vb|[-,<columns>]|. If the data had recently been read by way of \vb|\readdef|, the string \vb|\ncols| may be used to signify the \vb|<columns>| value. The \texttt{-} (or any other character before the initial comma) reminds us that the range of row numbers is not specified in advance, but is dictated by the length of the data macro containing the raw file data. For such a 2-D array, only the column range is specified. \end{sloppypar} Consider, for example, the previously discussed file, \texttt{dataA.txt}, which had been digested into the data macro \cmd{dataA}. One can process that as a 2-D array with an invocation of \vb|\readarray\dataA\twoD[-,\ncols]|, since \cmd{ncols} had been set to a value of \texttt{\ncols}, based on the prior \cmd{readdef}. Thereafter, data may be retieved, for example the 3rd row, 2nd column, with \cmd{twoD[3,2]}, to give \readarray\dataA\twoD[-,\ncols] ``\twoD[3,2]''. The actual array size is given by \cmd{twoDROWS}, \cmd{twoDCOLS}, \cmd{twoDCELLS} as \twoDROWS, \twoDCOLS, and \twoDCELLS, respectively. The number of rows in the array is fewer than the number of file records, \oneDCELLS, because blank rows in the input file are ignored. One should also note that if the end of the data stream results in a partial final row of data, the partial row will be discarded. \subsubsection{3-D Arrays} For the 3-D case, the only difference in the invocation of \vb|\readarray| is in the 3rd argument, in which the rank and range of the array is specified. This last argument will be of the form \vb|[-,<rows>,<columns>]|. As before, the \vb|-| denotes the fact that the range of the planes of data is unknown before the fact, and governed by the length of data in the dataset. Only the range of rows and columns are specifiable here. If \vb|\readdef| had been used on a properly formed input file, both \vb|\nrows| and \vb|\ncols| may be used to supply the range arguments of the 3-D array. For example, using the same \cmd{dataA} dataset, but reading it as a 3-D array can be accomplished with \vb|\readarray\dataA\threeD[-,\nrows,\ncols]|.% \readarray\dataA\threeD[-,\nrows,\ncols] This results in an array with \threeDPLANES{} planes, \threeDROWS{} rows, and \threeDCOLS{} columns (\threeDCELLS{} data cells total). Data from the 2nd plane, 1st row, 2nd column can be obtained via \cmd{threeD[2,1,2]} as ``\threeD[2,1,2]''. If, perchance, a row or plane is only partially defined by \cmd{readarray}, the partial data is discarded from the array. \subsubsection{Array Parsing Separator} While it may be easily envisioned that the array data is numerical, this need not be the case. The array data may be text, and even formatted text. Furthermore, one may introduce space characters into the data of individual cells simply by resetting the \rdar{} parsing separator to something other than the default space, ``~''. This can be done, for example employing a comma as the separator, by way of \vb|\readarraysepchar{,}|.\margcmd{readarraysepchar} Note also, using the facilities of the underlying \loi{} package, that compound separators are possible. For example, \textit{either} a comma \textit{or} a period may be used for the data parsing, by specifying a \textbf{logical-OR} (\vb+||+) separated list: \vb:\readarraysepchar{,||.}:. Similarly, a multicharacter separator is possible, so that setting \vb|\readarraysepchar{!!}| will cause \cmd{readarray} to look for instances of ``!!'' to divide the data into separate array elements. Consider the following comma-separated input in, let us say, the file \textsf{conjugation.txt}. \verbfilebox[\footnotesize]{file3data.txt} \rl\theverbbox The sequence of commands \begin{verbbox} \readarraysepchar{,} \readdef{conjugation.txt}\dataC \readarray*\dataC\tobeConjugation[-,\nrows,\ncols] \end{verbbox} \rl\theverbbox \readarraysepchar{,} \readdef{file3data.txt}\dataC \readarray*\dataC\tobeConjugation[-,\nrows,\ncols] will employ a comma separator to parse the file. It will then create a 3-D array using data from the file, placed into the array \cmd{tobeConjugation}. Leading/trailing spaces will be removed from the data, with the use of the star form of the \cmd{readarray} command. Data can then be directly accessed, so that, for example \cmd{tobeConjugation[1,3,3]} will yield the entry from the 1st plane, 3rd row, 3rd column as ``\tobeConjugation[1,3,3]''. The 3-D array metrics are \cmd{tobeConjugationPLANES}, \cmd{tobeConjugationROWS}, \cmd{tobeConjugationCOLS}, and \cmd{tobeConjugationCELLS}, which are here given as \tobeConjugationPLANES, \tobeConjugationROWS, \tobeConjugationCOLS, and \tobeConjugationCELLS. respectively. \section{Recalling Data from Array Structures} \begin{sloppypar} While one must specify the number of columns and/or rows associated with the \vb|\readarray| invocation, those numbers may not yet be known to the user, if the values employed came from the \vb|\readdef| estimations of \vb|\ncols| and \vb|\nrows|. Therefore, the \cmd{readrray} \margcmd{{\rmfamily\itshape array-identifier}CELLS}% \margcmd{{\rmfamily\itshape array-identifier}PLANES}% \margcmd{{\rmfamily\itshape array-identifier}ROWS}% \margcmd{{\rmfamily\itshape array-identifier}COLS}% command variants also define the following strings: \itshape\vb|\|array-identifier\vb|CELLS|, \vb|\|array-identifier\vb|PLANES|, \vb|\|array-identifier\vb|ROWS|{\upshape, and} \vb|\|array-identifier\vb|COLS|\upshape, where \cmd{array-identifier} is the array name supplied to the \cmd{readarray} command. Note, for 3-D arrays, that \end{sloppypar} \rl\itshape\vb|\|array-identifier\vb|CELLS| $=$ \\\rl\quad \vb|\|array-identifier\vb|PLANES| $\times$ \vb|\|array-identifier\vb|ROWS| $\times$ \vb|\|array-identifier\vb|COLS|\upshape For the \cmd{tobeConjugation} example of the prior section, \tobeConjugationCELLS $=$\tobeConjugationPLANES $\times$\tobeConjugationROWS $\times$% \tobeConjugationCOLS. Likewise, for 2-D arrays \rl\itshape\vb|\|array-identifier\vb|CELLS| $=$ \vb|\|array-identifier\vb|ROWS| $\times$ \vb|\|array-identifier\vb|COLS|\upshape To retrieve the data from the array, one merely supplies the array name in the form of \cmd{}\textit{array-identifier}\margcmd{\rmfamily\itshape array-identifier% \upshape\ttfamily[...]}, along with the array-cell nomenclature in the form of \textit{\texttt{\upshape[}plane\texttt{\upshape,}row\texttt{\upshape,}% column\texttt{\upshape]}} for 3-D arrays, \textit{\texttt{\upshape[}row\texttt{\upshape,}% column\texttt{\upshape]}} for 2-D arrays, and \textit{\texttt{\upshape[}row\texttt{\upshape]}} for 1-D arrays. Thus, in the case of the earlier example involving conjugation of the verb \textit{to be}, the second-person future-perfect tense of the verb is given by \rl\cmd{tobeConjugation[3,2,4]} which yields ``\tobeConjugation[3,2,4]''. \section{Bounds Checking} While the user is developing his or her application involving the {\rdar} package, there may accidentally arise the unintended circumstance where an array element is requested which falls outside the array bounds. In general, when a non-existent array element is requested in the absence of bounds checking, the call will expand to \cmd{relax}. The package provides three declarations to set the manner in which array bounds are be monitored. The setting \cmd{nocheckbounds}\margcmd{nocheckbounds} is used when bounds are not to be checked. This is the default behavior for \rdar. For some bounds checking, \cmd{checkbounds}\margcmd{checkbounds} may be set. With this setting, bounds violations are noted, but no guidance is provided as to the allowable index range for the array. However, with \cmd{hypercheckbounds}\margcmd{hypercheckbounds} set, full bounds checking is possible. With this setting, not only are violations noted, but a description of the actual array range is provided. As of V2.0, bounds violations are noted in the log file, rather than the document itself. However, if an array bound is violated when bounds checking is turned on, a ``?'' shows up in the document itself. \section{Accessing Array Cells if Full Expansion is Required (e.g., placed in an \texttt{\bs edef}) } If full expansion is required of array cell contents (and assuming the cell content is expandable), it is advisable to set \cmd{nocheckbounds}% , so that the error checking code is not included in the expansion. Results may be also expanded even with \cmd{checkbounds} set, though the error-checking code is part of the expansion. However, with \cmd{hypercheckbounds} set, full expansion of array cells is no longer possible. \section{Accessing Array Cells if No Expansion is Required} With the normal use of \cmd{\rmfamily\itshape array-identifier} syntax for accessing array cells, several levels of expansion are required to directly recover the original tokens of the cell, and then only when bounds checking is disabled. When the actual unexpanded tokens of cell are required, the use of the \cmd{arraytomacro}\margcmd{arraytomacro} command provides the means to accomplish this. The command takes the array name and index as the initial arguments followed by a generic macro name into which to place the unexpanded tokens of the indexed array cell. So, for example \vb|\arraytomacro\tobeConjugation[2,2,3]\thiscell| will place the cell's original tokens in the macro \cmd{thiscell}. Upon detokenization, \cmd{thiscell} contains \arraytomacro\tobeConjugation[2,2,3]\thiscell ``\texttt{\detokenize\expandafter{\thiscell}}''. \section{Support Routines} The package provides two commands that can help one understand how a data set has been parsed into an array. Both of these commands dump the specified array to the document. In the case of \cmd{arraydump}\margcmd{arraydump}, the array is formatted in the structure of the array, broken up by columns, rows, and planes. In the case of \cmd{scalardump}\margcmd{scalardump}, however, the elements of the array are dumped sequentially, without reference to the array's heirarchy. For the case of 1-D record array \cmd{oneD} employed in prior sections, for example, the invocations of \rl\vb|\arraydump\oneD|\\ \rl\vb|\scalardump\oneD| results in \arraydump\oneD \scalardump\oneD \clearpage The \cmd{twoD} equivalent, resulting from parsing the same data file as a 2-D, rather than a 1-D record array, is \rl\vb|\arraydump\twoD|\\ \rl\vb|\scalardump\twoD| \arraydump\twoD \scalardump\twoD For the case of the 3-D array (earlier read as \cmd{threeD}), the \cmd{arraydump} would appear as \rl\vb|\arraydump\threeD|\\ \rl\vb|\scalardump\threeD| \arraydump\threeD \scalardump\threeD Note that the \cmd{scalardump} of \cmd{threeD} is indistinguishable from that of \cmd{twoD}, since both arrays are comprised of the same data cells, though arrayed into different plane/row/column structures. \clearpage For comparison, the \cmd{arraydump} of \cmd{tobeConjugation} is \arraydump\tobeConjugation \section{Deprecated, Vestigial, and Defunct Features} \textbf{Deprecated} The following commands are supplied, but are no longer the preferred embodiment of package syntax. \itshape \rl\vb|\copyrecords{|array-identifier\vb|}|% \\ \rl\vb|\readArrayij{\|data-macro\vb|}{|array-identifier\vb|}{|columns\vb|}|% \\ \rl\vb|\readArrayij*{\|data-macro\vb|}{|array-identifier\vb|}{|columns\vb|}|% \\ \rl\vb|\readArrayijk{\|data-macro\vb|}{|array-identifier\vb|}{|rows\vb|}{|columns\vb|}|% \\ \rl\vb|\readArrayijk*{\|data-macro\vb|}{|array-identifier\vb|}{|rows\vb|}{|columns\vb|}|% \\ \rl\vb|\showrecord[|error\vb|]{|record number\vb|}|\\ \rl\vb|\Arrayij[|error\vb|]{|array-identifier\vb|}{|row\vb|}{|column\vb|}|\\ \rl\vb|\Arrayijk[|error\vb|]{|array-identifier\vb|}{|plane\vb|}{|row\vb|}{|% column\vb|}|\\ \rl\vb|\arrayij{|array-identifier\vb|}{|row\vb|}{|column\vb|}|\\ \rl\vb|\arrayijk{|array-identifier\vb|}{|plane\vb|}{|row\vb|}{|% column\vb|}|\upshape \textbf{Vestigial} The following support macros are provided but no longer recommended. Their capability is more fully served by way of the \loi{} package. \itshape \rl\vb|\getargsC{\|macro {\upshape or} string\vb|}|\\ \rl\vb|\arg|index\\ \rl\vb|\narg|\\ \rl\vb|\showargs|% \upshape Note that whereas \cmd{getargs} could previously (pre-V2.0 \rdar) employ only a space as the parsing separator, \cmd{getargs} now respects the currently set value of separator, as (re)defined by \cmd{readarraysepchar}. \textbf{Defunct} The following macros are no longer supported. \itshape \rl\vb|\converttilde|\\ \rl\vb|\record|index \upshape Since the package now supports arbitrary parsing separators, there is no need for the function of \cmd{converttilde}. However, were one desiring to parse, while treating hard spaces as spaces, this can be simply achieved under V2.0 \rdar{} by setting the parsing character as either a space or a hard space, using \vb:readarraysepchar{ ||~}:. Likewise, the indirect addressing (using a romannumeral \textit{index}) provided by the internal command \cmd{record}\textit{index} is fully superceded by the ability to directly address any record of \rdar's 1-D record arrays. \section{Acknowledgements} I am profoundly thankful to Christian Tellechea for using my simplistic (read ``deficient'') \textsf{getargs} package to inspire his effort in creating the powerful \loi{} package. It is precisely the tool I have sought for a long time, and I have adapted its use into the workings of this package. I would like to thank Dr. David Carlisle for his assistance in helping the author rewrite the \vb|\getargs| command, originally found in the \textsf{stringstrings} package. To distinguish the two versions, and in deference to him, it is herein named \vb|\getargsC|. However, as of V2.0, its presence is vestigial, having instead been superceded with the \loi{} package macros. I am likewise grateful to Ken Kubota, who suggested moving the \vb|\newread| outside of \vb|\readdef|, so as not to prematurely exhaust the 16 available file streams. \clearpage \section{Code Listing} \verbfilenobox[\footnotesize]{readarray.sty} \end{document}
{ "alphanum_fraction": 0.7368177613, "avg_line_length": 40.4868913858, "ext": "tex", "hexsha": "8376958419f341de3846b35b265a23da2b42e853", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "8b87fd457921447791308ddf57d839a31e2e566d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "fierg/two-dimensional-RLE", "max_forks_repo_path": "doc/kol/readarray.tex", "max_issues_count": 30, "max_issues_repo_head_hexsha": "8b87fd457921447791308ddf57d839a31e2e566d", "max_issues_repo_issues_event_max_datetime": "2020-01-26T15:29:46.000Z", "max_issues_repo_issues_event_min_datetime": "2019-07-15T14:19:47.000Z", "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "fierg/two-dimensional-RLE", "max_issues_repo_path": "doc/kol/readarray.tex", "max_line_length": 140, "max_stars_count": 1, "max_stars_repo_head_hexsha": "8b87fd457921447791308ddf57d839a31e2e566d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "fierg/two-dimensional-RLE", "max_stars_repo_path": "doc/kol/readarray.tex", "max_stars_repo_stars_event_max_datetime": "2021-03-18T15:08:24.000Z", "max_stars_repo_stars_event_min_datetime": "2021-03-18T15:08:24.000Z", "num_tokens": 9708, "size": 32430 }
%!TEX root = ../thesis.tex %******************************************************************************* %****************************** Concluding Chapter ***************************** %******************************************************************************* \chapter{Concluding remarks} %Title of last chapter \label{chap:conclusion} \ifpdf \graphicspath{{Chapter4/Figs/Raster/}{Chapter4/Figs/PDF/}{Chapter4/Figs/}} \else \graphicspath{{Chapter4/Figs/Vector/}{Chapter4/Figs/}} \fi %********************************** %First Section ************************************** This thesis has explored the causes and effects of several uncertainties in nuclear fusion reactor analysis and operation. It began by describing motivations for developing controlled nuclear fusion power, by situating new power generation technologies within a broader environmental and socio-political context. A growing world population demanding improving material living standards has increased global power demand to double its value of 40 years ago. Human-induced climate change, deteriorating air quality and a desire for greater energy security have helped spur the development of low-carbon electricity production technologies such as renewables, GEN-IV fission and fusion to meet contemporary and future demands for power. Whilst the body of nuclear fusion knowledge has massively accumulated since the process was first discovered, constructing and operating a working fusion reactor, as opposed to an experiment, is still yet to happen. However we are now in the final stages of research and development before such a reactor is constructed and it is tremendously important to identify and quantify any risks to the realisation of controlled nuclear fusion as an economical, practical electricity generation scheme. Many of these risks are political and organisational, but as the introductory chapter alluded to, many are technical, engineering challenges. Hopefully none are physical impossibilities. Many of the technical risks are amplified by our inexact knowledge of them. A tremendous amount of work is currently undertaken to model the performance of current and future fusion devices, simulating performance and estimating parameters of interest. However, these parameters are always subject to some sort of uncertainty, whether it is quoted or not. Uncertainty in powers, Mean Time Between Failures (MTBF), particle fluxes, cost of electricity, Tritium Breeding Ratios and more. To decrease these uncertainties, to narrow their distribution around the mean value, is to reduce the maximum potential risk associated with them and to give room for manoeuvre in the trade-offs that come with engineering a functioning system. The work which comprises this thesis has investigated several sources of uncertainty of pertinence to the development of nuclear fusion as a power generation scheme. First, a stochastic, sampling technique known as Total Monte Carlo was used to explore the effects of nuclear data uncertainty in the TBR for a future fusion power plant design, DEMO. This technique has never before been used to estimate uncertainty on TBRs. Investigating the contribution of uncertainty from lead nuclear data, many radiation transport simulations of the DEMO device were performed, each tallying the TBR and sampling different lead nuclear data. This work used the TENDL2015 nuclear dataset, with fully correlated cross-channel behaviour for the reaction channels, angular distributions and other variables. The results of the work were to determine the standard deviation of the HCLL DEMO TBR due to lead, 1.2\% of the mean value. The simulated TBR distribution was not normally distributed, instead it had a negative skewness--a low-value tail. As a result, 5.8\% of the TBR distribution was less than unity. This only serves to reinforce the importance of higher-order moments in parameter probability distributions and the value of TMC style methods. Generally, where parameter mean values are close to the limits of some operational range, one should seek to know the shape of that parameter distribution, not just the extent. Whilst a TBR in a liquid-metal breeder blanket could potentially be tailored through on-line $^{6}$Li enrichment, this is not the case for ceramic type breeding concepts. For those, an overestimated TBR could be a costly mistake. After sampling the TBR distribution, the relationships between fundamental nuclear parameters and the TBR were investigated, with a handful of local Optical Model Potential (OMP) parameters responsible for most of the variation in TBR. In terms of future research, the determination of uncertainty contributions to TBR in lead blankets from other nuclides such as $^{16}$O, $^{56}$Fe and $^{182,183,184,186}$W would be a worthwhile effort. Advances in theory or new nuclear physics experimental data could help refine our models of lead nuclei and their behaviour, reducing uncertainties in lead based blanket designs. The subsequent chapter looked at a particular modelling approximation, spatial homogenisation, where heterogeneity in material composition is artificially reduced by replacing many realistic materials with mass-conserving mixtures of materials. The effects of this approximation on radiation shielding were investigated. First the basic theory of radiation shielding was introduced, before a description of a comparative method for determining the discrepancy between heterogeneous and homogeneous modelling approaches. This method was employed to analyse how the approximation affects calculated dose rates for parameters relevant to the ITER tokamak. In one scenario, the on-load dose rate from D-T fusion was calculated on the far side of a reinforced concrete wall similar to the ITER bio-shield. The discrepancy induced by the homogeneous approximation was a function of wall thickness, and attained a maximum of a 22\% underestimate of the neutron dose. This underestimate in the homogeneous simulation is due to the dispersed absorbing nuclei from the steel material which act as neutron sinks. There was less effect for photons, with a maximum underestimate of 10\%. The impacts of spatial homogenisation on the Shut-Down Dose Rate (SDDR) were also investigated. Activation at the internal face of the shield was found to be overestimated by a small amount by spatial homogenisation, approximately 10\%, although this did vary as a function of time since last irradiation. The effects of spatial homogenisation in breeding blankets for fusion have been explored by \citeauthor{Pelloni1989} amongst others \cite{Kumar1989}. However, these results pertaining to radiation shielding in nuclear systems are novel, with no similar study having been published before. Finally, an exploration of energy domain discretisation, or `group structure optimisation' was undertaken. How the energy domain is subdivided for nuclear analyses can have a significant effect on results. Previous methods for optimisation were mentioned, along with a discussion of nuclear resonant behaviour and the phenomenon of self-shielding. Having developed a framework for the generation of nuclear data on an arbitrary group structure, a method for the targeting of bin density is devised. The method starts with a logarithmically-spaced group structure and determines where self-shielding modifications to cross-sections most impact reaction rates. Distributions of effective self-shielding in energy are computed and summed for chosen nuclides. This distribution is then used as the basis for a bin density distribution and the apportioning of the energy domain. The method is applied to optimise group structures for two cases, a simple tungsten-only example and a more general group, optimised for 9 metals (>40 nuclides) of importance to fusion. Both conferred a significant advantage over traditional group structures in common usage today. The optimised 280 bin group structure was used to determine reaction rates in JET activation foils more accurately than the CCFE 709 bin group structure. Future work could involve the testing of optimised general group structures, to see if there is still an advantage when optimisation is for a very large population of nuclides. Iterative applications of the algorithm were investigated during the course of this work, but did not confer any advantage of single applications. One alternative, related route for optimisation could be using the effective self-shielding distributions as defined in this work, but starting from hyper-fine groups and removing the least necessary bounds until a target is reached. This approach may waste fewer bins than going from relatively coarse to locally fine, as is done in the current implementation. Nuclear analyses are subject to a variety of sources of uncertainty. These can be from nuclear data, modelling approximations, discretisation of variables, amongst many other factors. Often these sources are simultaneously present in problems. \citeauthor{El-Guebaly2009}'s study of contributions to TBR uncertainty in lithium-lead blankets asserts that 90\% of the TBR margin required is to account for uncertainty in its estimation, only the remaining fraction is the required net gain for system losses \cite{El-Guebaly2009}. The uncertainty comes from both nuclear data (60\%) and modelling (30\%). Sometimes modelling approximations are synergistic in effect, as \citeauthor{Pelloni1989} noted that failing to account for self-shielding effects in breeding calculations is particularly important with homogenised geometries \cite{Pelloni1989}. Minimising uncertainty will become more important as we move towards constructing the first demonstration fusion devices, as unexpected or poor performance may reduce the momentum of these projects. We can continue to reduce sources of uncertainty in nuclear analyses through the development of improved methods, such as the energy group structure optimisation presented here. If approximations must be made for issues of limited computation, as with spatial homogenisation, then further analysis of the effects will be necessary to ensure their effects are adequately understood. Lastly, quantifying parameter distributions through the application of uncertainty propagation techniques like TMC will likely see an increased role--expanding from solely ND to sample other sorts of input distributions, perhaps including manufacturing tolerances and other parameters.
{ "alphanum_fraction": 0.8015034732, "avg_line_length": 262.725, "ext": "tex", "hexsha": "50a34d1b978c6bb23e4fb08a6cd8766c66b69dbd", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "dd37e323e8093c0b2e7f07118f81c9320dc018c6", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "thomas-fred/thesis", "max_forks_repo_path": "Chapter4/chapter4.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "dd37e323e8093c0b2e7f07118f81c9320dc018c6", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "thomas-fred/thesis", "max_issues_repo_path": "Chapter4/chapter4.tex", "max_line_length": 2265, "max_stars_count": null, "max_stars_repo_head_hexsha": "dd37e323e8093c0b2e7f07118f81c9320dc018c6", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "thomas-fred/thesis", "max_stars_repo_path": "Chapter4/chapter4.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1965, "size": 10509 }
\chapter{Literature Review} \label{chap:2} \section{Nuclear Fuel Cycle Simulator History} The nuclear fuel cycle represents the nuclear fuel life cycle from initial extraction through processing, use in reactors, and, eventually, final disposal. This complex system of facilities and mass flows collectively provide nuclear energy in the form of electricity \cite{yacout_modeling_2005}. A closed nuclear fuel cycle reprocesses used fuel, whereas an open nuclear fuel cycle does not. The US has an open nuclear fuel cycle; other countries, such as France, have a closed nuclear fuel cycle. % Why were nuclear fuel cycle simulators introduced Nuclear fuel cycle system analysis tools were introduced to investigate nuclear fuel cycle dynamics at a local and global level. Nuclear fuel cycle simulators' primary purpose is to understand the dependence between various system designs, deployment strategies, and technology choices in the nuclear fuel cycle and the impact their variations have on the system's performance. Nuclear fuel cycle simulator results are used to guide research efforts, advise future design choices, and provide decision-makers with a transparent tool for evaluating \gls{FCO} to inform big-picture policy decisions \cite{yacout_modeling_2005}. Nuclear fuel cycle simulators were initially introduced by the Nuclear Strategy Project at Science Applications International Corp to provide simple system dynamic models to improve technical dialog between policymakers and expert groups \cite{yacout_modeling_2005}. Since then, national laboratories around the globe have driven development of nuclear fuel cycle simulators. These simulators track the flow of materials through the nuclear fuel cycle, from enrichment to final disposal of the fuel. However many have been developed for customized applications, resulting in inflexible architectures \cite{huff_fundamental_2016}. Two methods can be used to model facility and material flow in nuclear fuel cycle simulators: fleet-level and agent-level. Fleet-based models do not distinguish between discrete facilities or materials but instead lump them into fleets and streams but they offer simplicity and lower computational cost. Agent-based models treat facilities and materials as discrete objects. This method's advantages are more flexible simulation control, ease of simulating a wide range of scenarios with new technologies, enabling of plug-and-play comparison of modeling methodologies, and allowing for a range of fidelities. Many nuclear fuel cycle simulators value integral effects over isotopic and facility-level resolution by modeling only fleet-level dynamics, grouping facilities into fleets and materials into streams \cite{huff_fundamental_2016}. Historically, national laboratories have restricted public access to their tools, resulting in universities and other non-laboratory organizations creating their own nuclear fuel cycle simulator tools. Table \ref{tab:nfctools} shows a breakdown of all major nuclear fuel cycle simulators and the organization(s) associated with them. \begin{table}[] \caption{Nuclear fuel cycle simulator tools and their corresponding organizations.} \label{tab:nfctools} \centering \doublespacing \small \resizebox{1\textwidth}{!}{ \begin{tabular}{lll} \hline \textbf{NFC Simulator} & \textbf{Country} &\textbf{Organization(s) associated with it} \\ \hline ANICCA \cite{skarbeli_quantification_2020}& Belgium & \gls{SCK CEN}\\ CAFCA \cite{guerin_impact_2009} & USA &\gls{MIT} \\ CLASS \cite{mouginot_class_2012} &France & \gls{CNRS}, \\ && \gls{IRSN} \\ COSI \cite{coquelet-pascal_cosi6:_2015} &France & \gls{CEA} \\ \Cyclus \cite{huff_fundamental_2016} & USA & \gls{UW}, \\ && \gls{UIUC} \\ DESAE \cite{tsibulskiy_desae_2006} &-& \gls{OECD} \\ DYMOND \cite{yacout_modeling_2005} &USA & \gls{ANL} \\ EVOLCODE2 \cite{alvarez-velarde_evolcode2_2007} & Spain &\gls{CIEMAT}\\ FAMILY21 \cite{mccarthy_benchmark_2012} & Japan&\gls{JAEA} \\ MARKAL \cite{fishbone_markal_1981} & USA & \gls{BNL} \\ NFCSim \cite{schneider_nfcsim:_2005}& USA &\gls{LANL} \\ NFCSS \cite{iaea_nuclear_2007}& -&\gls{IAEA} \\ ORION \cite{gregg_analysis_2012} & UK & \gls{NNL} \\ VISION \cite{jacobson_vision:_2006} & USA & \gls{INL} \\ \hline \end{tabular}} \end{table} In this work, we use the \Cyclus and DYMOND nuclear fuel cycle simulator tools. The \Cyclus nuclear fuel cycle simulator was created to break the practice of tools with inflexible architectures and restricted access \cite{huff_fundamental_2016}. \Cyclus is an open source nuclear fuel cycle simulator with agent-based modeling of discrete facilities and isotopic materials. With an agent-based framework, the simulator tracks transformation and trade of resources between agents with customizable behavior \cite{huff_fundamental_2016}. This enables extension and reuse of this tool for fuel cycle simulations with different objectives. DYMOND is a hybrid nuclear fuel cycle simulator tool that uses fleet-based modeling for all facilities and materials with an exception of discrete modeling for reactor facilities. Chapter \ref{chap:3} provides more detail about \Cyclus and DYMOND. \section{Transition Scenario Capabilities in Nuclear Fuel Cycle Simulators} \label{sec:egs} The Office of Nuclear Energy's \gls{FCO} Campaign led an evaluation and screening study of a comprehensive set of nuclear \glspl{FCO} to identify \glspl{FCO} with the potential to substantially improve the nuclear fuel cycle in the challenge areas \cite{wigeland_nuclear_2014}. The evaluation and screening study identified 40 \glspl{EG} to represent a comprehensive set of all possible nuclear fuel cycles \cite{wigeland_nuclear_2014}. Each evaluation group consists of a group nuclear fuel cycles that similar resource requirements, fuel mass usage and compositions, and disposal needs \cite{wigeland_nuclear_2014}. The study assessed each evaluation group using 9 evaluation criteria: nuclear waste management, proliferation risk, nuclear material security risk, safety, environmental impact, resource utilization, development and deployment risk, institutional issues, and financial risk. The study concluded that fuel cycles involving continuous recycling of co-extracted U/Pu or U/TRU in fast spectrum critical reactors consistently scored high overall performance. EG23, EG24, EG29, and EG30 are the high-performing fuel cycle options \cite{wigeland_nuclear_2014}. These evaluation groups were evaluated at an equilibrium state to understand their end-state benefits. Knowing the most promising end-state evaluation groups, the next step is to evaluate and compare the transition process from the current EG01 state to these promising evaluation groups \cite{feng_standardized_2016}. The transition from the once-through fuel cycle to a closed fuel cycle has a slow dynamic, and a complex interdependence of many factors. Thus, the study of transition scenarios using a nuclear fuel cycle simulator is key to understanding the influence of these multi-coupled factors on the transition. The U.S. national laboratories conducted a benchmarking effort of transition scenario capabilities in nuclear fuel cycle simulators \cite{feng_standardized_2016,guerin_benchmark_2009}. This comparison study aims to drive nuclear fuel cycle simulator advancements and build confidence in the use of nuclear fuel cycle simulators in strategic and policy decisions \cite{feng_standardized_2016}. Both nuclear fuel cycle simulator tools used in this thesis, \Cyclus and DYMOND, were verified in a transition scenario benchmarking effort \cite{feng_standardized_2016,bae_standardized_2019}. The reference problem used in the benchmark was a simplified transition from a one hundred 1000-MWe \glspl{LWR} to a 333.3-MWe \gls{SFR} fleet. They were found to have excellent agreement with the analytical solution and other nuclear fuel cycle simulators, ORION, VISION, and MARKAL. This benchmarking effort proved that these nuclear fuel cycle simulators are capable of simulating a simple transition scenario. However, to evaluate the nuclear fuel cycle simulators' flexibility, Feng et al concluded that more efforts must be made to model realistic transition scenarios \cite{feng_standardized_2016}. \section{Sensitivity Analysis Studies} % sensitivity analysis in dynamic simulations in other fields We simulate transition scenarios to predict the future; however, when implemented in the real world, the simulated scenarios tend to deviate from the optimal scenario. Also, transition scenario analysis using nuclear fuel cycle simulators are imperfect representations of the real world \cite{noauthor_effects_2017}. Therefore, sensitivity analysis studies of nuclear fuel cycle transition scenarios must be conducted to better understand the impact of the variation of input parameters on performance metrics, enabling the nuclear fuel cycle simulators to more reliably inform policy decisions \cite{passerini_systematic_2014}. Transition scenario sensitivity analysis is a technique used to determine how varying different input variables impacts a transition scenario's performance metrics. Assumptions about facility parameters and technology readiness are made when setting up the simulation scenarios. Sensitivity analysis evaluates each performance metric's sensitivity to each assumption. Previous work towards sensitivity analysis and uncertainty quantification of nuclear fuel cycle simulations used these terms interchangeably because uncertainty quantification is viewed as design uncertainty \cite{noauthor_effects_2017}. For example, a never-been-built pyrochemical reprocessing facility's throughput is viewed as a variable design parameter. We determine how variation of the pyroprocessing facility's throughput impacts performance metrics. Therefore, in this thesis, we refer to both sensitivity analysis and uncertainty quantification as sensitivity analysis. By conducting studies on an extensive input parameter set, it is possible to determine the input parameters' that each performance metric is most sensitive to. This helps us target where we should conduct closer sensitivity analysis and add further modeling detail. It also identifies which parameters the system is relatively insensitive to \cite{noauthor_effects_2017}. In this work, we use three types of sensitivity analysis: one-at-a-time, synergistic, and global. \subsection{One-at-a-time Sensitivity Analysis} The one-at-a-time sensitivity analysis technique estimates the isolated effect of one input variable. This approach gives each variable's local impact on the performance metrics. \gls{OECD} conducted an one-at-a-time sensitivity analysis \cite{noauthor_effects_2017} on key nuclear fuel cycle input parameters and quantified the impacts on the performance metrics. In the OECD study, the base scenario used has a duration of 200 years and begins with \glspl{PWR}, that transition to \glspl{SFR} while maintaining constant electricity production. Each parameter was varied independently for three cases: the base case, a high case, and a low case with respect to the base case. The results of these variations on the performance metrics are expressed in tornado plots and sensitivity tables. The OECD study's analysis overview is given in Figure \ref{fig:oecd-sensitivitytable}. Figure \ref{fig:oecd-tornado} shows an example tornado plot from the OECD study that represents the sensitivity of the separated Pu in storage amount to the various input parameters. \begin{figure}[] \begin{center} \includegraphics[scale=0.75]{./figures/oecd-sensitivitytable.png} \end{center} \caption{Overall results from OECD one-at-a-time sensitivity analysis study. This is reproduced from the OECD report \cite{noauthor_effects_2017}. Sensitivity Table with an overview of all the sensitivity indicators “S” obtained from the various input parameters (one row for each input parameter) and the various output parameters (one column for each output parameter). When a sensitivity coefficient is positive (red), this means an increase of the input parameter induces an increase of the output parameter. Whereas, when it is blue, this means an increase of the input parameter induces a decrease of the output parameter. When a coefficient of determination $r^2$ is lower than 0.9, then the related sensitivity indicator is replaced by a question mark “?” in the table. When the output parameter is not impacted by the variation of the input parameter, then the related sensitivity indicator is not available and it is replaced by a blank in the table. \cite{noauthor_effects_2017}. } \label{fig:oecd-sensitivitytable} \end{figure} \begin{figure}[] \begin{center} \includegraphics[scale=1]{./figures/oecd-tornado.png} \end{center} \caption{A tornado plot from the OECD one-at-a-time sensitivity analysis study, showing the sensitivity of the separated Pu in storage amount to each input parameter. This is reproduced from the OECD report \cite{noauthor_effects_2017}.} \label{fig:oecd-tornado} \end{figure} \subsection{Synergistic Sensitivity Analysis} \label{sec:syn} The synergistic sensitivity analysis technique involves multi-parameter input sweeps to view how synergistically changing input variables impacts the performance metrics. Synergistic sensitivity analysis is conducted by varying two input variables simultaneously and viewing their combined impact on each performance metric or a combination of weighted performance metrics. Passerini et al \cite{passerini_systematic_2014} applied this analysis method on nuclear fuel cycle simulations. Figure \ref{fig:passerini_payoff} shows the results of a synergistic analysis conducted by Passerini et al \cite{passerini_systematic_2014} in which thermal reprocessing and fast reactor technology introduction dates were varied. The plot shows an objective payoff surface representing a combination of weighted optimization criteria: minimize construction of reprocessing plants, minimize \gls{LCOE}, minimize depleted uranium generated, and minimize total SWU used. This type of synergistic studies successfully informs on how variation in two input variables impacts the system; however, if more than two input variables are varied, it is difficult to visualize the impact on the system in a plot. Therefore, the subsequent section's global sensitivity analysis method is introduced to inform on the global sensitivity of the system. \begin{figure}[] \begin{center} \includegraphics[scale=0.45]{./figures/passerini_payoff.jpg} \end{center} \caption{Payoff surface for variation in thermal reprocessing and fast reactor technology introduction date \cite{passerini_systematic_2014}. The payoff surface represents a combination of weighted optimization criteria: minimize construction of reprocessing plants, minimize \gls{LCOE}, minimize depleted uranium generated, and minimize total SWU used. } \label{fig:passerini_payoff} \end{figure} \subsection{Global Sensitivity Analysis} \label{sec:sobol} To fully consider the synergistic effects of simultaneous variation of all the nuclear fuel cycle input parameters, a variance-based approach can be used instead \cite{thiolliere_methodology_2018}. Thiolliere et al. conducted a global sensitivity analysis of a nuclear fuel cycle transition scenario by using Latin Hypercube sampling \cite{sobol_global_2001} to generate Sobol indices \cite{mckay_comparison_2000} that indicate which design parameters have the most influence on the performance metrics. They applied this method to a simplified PWR UOX and PWR MOX fleet. Latin Hypercube sampling is a statistical method for generating random samples of parameter values from a multidimensional distribution. For Latin Hypercube Sampling of M input parameters, the user first chooses the number of sample points, N, then each parameter's input space is divided into N sub-sections. The algorithm will then select a random value from each sub-section for each input parameter. Once there is a list of samples for each input parameter, they are combined randomly to form M-dimensional sets \cite{sobol_global_2001}. The nuclear fuel cycle simulation is run M times and the performance metrics of interest are recorded. Sobol sensitivity analysis provides how much variability in the model's performance metrics is dependent on each input parameter \cite{zhang_sobol_2015}. Sobol Indices decomposes the variance of the metric into fractions attributed to inputs or sets of inputs. A large Sobol index signifies that variation in that input variable is more impactful to the output parameter. A model is viewed as a function: \begin{align*} Y &= f(\textbf{X}) \intertext{where:} Y &= \mbox{Performance metric} \nonumber\\ \textbf{X} &= \mbox{vector of \textit{d} input parameters} \nonumber \\ d &= \mbox{No. of varying input parameters} \nonumber \end{align*} First order Sobol indices measure the effect of one input parameter on the performance metric with an average over variations in other input parameters \cite{im_sensitivity_1993}: \begin{align*} S_i &= \frac{V_i}{Var(Y)} \intertext{where:} S_i &= \mbox{First order sensitivity index} \nonumber \\ V_i &= Var_{X_i}(E_{\textbf{X}(\sim i)}(Y|X_i)) = \mbox{Conditional variance} \nonumber \\ \textbf{X}(\sim i) &= \mbox{the set of all variables except $X_i$} \end{align*} Total-effect Sobol indices measure the effect of the first order Sobol index plus all the interactions the one input parameter has with other input parameters \cite{homma_importance_1996}: \begin{align*} S_{Ti} &= 1-Var_{\textbf{X}(\sim i)}(E_{X_i}(Y|\textbf{X}(\sim i))) \end{align*} \subsection{Summary of Sensitivity Analysis Studies} Sensitivity analysis studies of nuclear fuel cycles have previously been used to narrow down and compare a wide range of nuclear fuel cycle scenarios to determine the ideal scenario. The evaluation and screening study \cite{wigeland_nuclear_2014} determined that the desired fuel cycle end states were fuel cycles involving continuous recycling of co-extracted U/Pu or U/TRU in fast spectrum critical reactors. These evaluation and screening study's sensitivity analysis focused on macro-level input parameters such as types of reactor and reprocessing technologies. However, sensitivity analyses regarding dynamic nuclear fuel cycle transitions are rare. The only relevant sensitivity study was conducted by OECD \cite{noauthor_effects_2017}, however it was a basic one-at-a-time sensitivity analysis. Therefore, synergistic and global sensitivity analysis studies focused on micro-level input parameters such as length of cooling time and introduction date of reprocessing/reactor technologies, should be conducted to understand how their variation impacts the performance metrics. Using the sensitivity analysis results, these transition scenarios can be further optimized and used to inform other nuclear research areas. For example, by studying how the throughput of a reprocessing facility impacts the performance metrics, we can determine the ideal reprocessing facility size.
{ "alphanum_fraction": 0.7732015103, "avg_line_length": 55.1452054795, "ext": "tex", "hexsha": "4e09d47352247184d56f744ff021a6c55d5a6b5a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5fee7fa4890cf2d96de10b0f6b80ed7d8e805428", "max_forks_repo_licenses": [ "BSD-2-Clause" ], "max_forks_repo_name": "gwenchee/ms-thesis", "max_forks_repo_path": "chapter2.tex", "max_issues_count": 3, "max_issues_repo_head_hexsha": "5fee7fa4890cf2d96de10b0f6b80ed7d8e805428", "max_issues_repo_issues_event_max_datetime": "2019-07-01T18:27:57.000Z", "max_issues_repo_issues_event_min_datetime": "2019-07-01T18:27:15.000Z", "max_issues_repo_licenses": [ "BSD-2-Clause" ], "max_issues_repo_name": "gwenchee/ms-thesis", "max_issues_repo_path": "chapter2.tex", "max_line_length": 192, "max_stars_count": null, "max_stars_repo_head_hexsha": "5fee7fa4890cf2d96de10b0f6b80ed7d8e805428", "max_stars_repo_licenses": [ "BSD-2-Clause" ], "max_stars_repo_name": "gwenchee/ms-thesis", "max_stars_repo_path": "chapter2.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 4527, "size": 20128 }
\chapter{Preliminaries} \label{chap:preliminaries} In this chapter, we will provide background information in order to understand the previous work in this field that was introduced in Chapter \ref{chap:related-work}. We will also rely on the knowledge provided in this chapter when describing data collection and processing in Chapter \ref{chap:data}, as well as when constructing the experimental setup in Chapter \ref{chap:setup}. We rely on the reader to be patient while reading this chapter as, although the interaction between the components we will introduce may not be immediately obvious, this will become clear in Chapter \ref{chap:setup} when the components are used to build a reinforcement learning environment and agents. Firstly, the concept of the order book (which was introduced above) is described in greater detail, as this serves as the data structure for the historical data collected. Subsequently, a simplified match engine is described. We will use this to emulate a local broker that can match orders using the historical order book. %The concept of a time series will then be introduced as the order book is essentially a multivariate time series. Furthermore, reinforcement learning is introduced in order to identify the differences between it and other machine learning techniques. This is followed by a detailed explanation of all its components. Finally, deep reinforcement learning is introduced as an extension to the previously described reinforcement learning principles. \section{Order Book} \label{sec:order-book} Traders post orders in a limit order book in order to state their intentions to buy (or sell) a given asset, as described in Section \ref{sec:problem-statement}). Orders listed in the limit order book provide \textit{liquidity} to the market as other traders can accept these offers by posting an order with the equivalent price to sell (or buy) the asset. This section introduces the most popular order types under which traders can post their offers in a limit order book. We will identify the types that are better with respect to ensuring market liquidity and which therefore benefit from lower fees and those that enable traders to state their wish to immediately buy or sell assets and take liquidity from the market. Furthermore, the characteristics of a historical order book that is filled with orders from traders is explained as knowing them will assist when the match engine is explained in the subsequent section. \subsection{Orders} \label{sec:orders} As indicated by the name, an order is an order to buy or sell a stock. There are various types of orders which determine how the order that is placed should be executed by the exchange. In this section, we provide information about the two most common types, namely the \textit{limit order} and the \textit{market order}, We define the indication to buy or sell as the \textit{Order Side}, \begin{equation}\label{eq:order-side} OrderSide=\{Buy, Sell\} \end{equation} \\ Before we define the order types in greater detail, we will conclude what is said above and define the \textit{Order} as, \begin{equation}\label{eq:order} Order=\{Order_{Limit}, Order_{Market}\} \end{equation} \subsubsection{Limit order} \label{sec:limit-order} A limit order refers to an attempt to buy or sell a stock at a specific price or better, \begin{equation}\label{eq:order-limit} Order_{Limit}=(side, quantity, price_{Limit}), \end{equation} where $side \in OrderSide$, $quantity \in \mathbb{R^+}$ and $price_{Limit} \in \mathbb{R^+}$. \\ \\ A buy limit order can only be executed at the limit price or lower, and a sell limit order can only be executed at the limit price or higher \cite{sec-limit-order}. More precisely, with respect to buy orders, if the best price on the opposing side of the book equals or falls to lower than the limit price (or for sell orders, equals or exceeds it), the broker will match those two orders, resulting in a \textit{trade}. The disadvantage of this order type is that there is no guarantee that the order will be executed. If no order appears on the opposing side, the order will remain (possibly forever) unexecuted. \subsubsection{Market order} \label{sec:market-order} A market order refers to an attempt to buy or sell a stock at the current market price, expressing the desire to buy (or sell) at the best available price. Therefore, \begin{equation}\label{eq:order-market} Order_{Market}=(side, quantity), \end{equation} where $side \in OrderSide$ and $quantity \in \mathbb{R^+}$. \\ \\ The advantage of a market order is that as long as there are willing buyers and sellers, the execution of the order is almost always guaranteed \cite{sec-market-order}. The disadvantage is the less competitive the price one pays when the order is executed. Market orders are executed by starting from the best price of the opposing side, then traversing down the book as liquidity is consumed. Hence, market orders tend to be expensive, especially large ones. \subsection{Characteristics} \label{sec:ob-characteristics} Figure \ref{fig:intro-orderbook} shows a real world example of a limit order book; in this case the snapshot was taken from a known crypto-currency exchange. To be precise, this is the \textit{state} of an order book at some time $t$ and shows the current limit orders from participants at this moment in time (ignoring the possibility that the state might have changed during the data sending process). Hence, we refer to it as an \textit{order book state (OS)}. We refer to the \textit{order book (OB)} that is used in this project as a recorded historical sequence of order book states. \begin{equation}\label{eq:order-book} OB=OS_1, ... OS_n \end{equation} As we can see, every such state holds entries whose \textit{price} or \textit{amount} change, on both the buyer's and the seller's sides. We refer to each row that can be formed by participants who submitted limit orders of some amount at the same price level as \textit{order book entry ($OE_{s_l}$)} of the side $s$ at level $l$. \begin{equation}\label{eq:order-book-entry} OE_{s_l}=(count, price, amount), \end{equation} whereas $count \in \mathbb{N}$, $price \in \mathbb{R^+}$ and $amount \in \mathbb{R^+}$. As a result, the order book state is a sequence containing order book entries for each \textit{side} (buy and sell) and the time stamp $ts$ (in milliseconds) of the state, \begin{equation}\label{eq:order-book-state} OS=(ts, OE_{b_1}, ..., OE_{b_n}, OE_{s_1}, ..., OE_{s_n}) \end{equation} \\ \begin{figure}[H] \centering \makebox[\linewidth]{ \includegraphics[width=10cm]{lob-simple.png} } \caption{Figure taken from \cite{miranda}. Simplified limit order book, which provides an understanding of some characteristics.} \label{fig:orderbook-simple} \end{figure} \hfill \\ Figure \ref{fig:orderbook-simple} illustrates a simplified order book, from which we can derive definitions. The \textit{limit level} specifies the position of an order book entry within the side of an order book state and the \textit{market depth} corresponds to how deep in the order book buyers and sellers have listed offerings. A deep order book therefore indicates a large range of limit levels. The term \textit{volume} can relate to the total volume traded over a given time horizon, or can indicate the sum of what is currently offered to a certain price. Considering the sides of the order book, a \textit{bid} refers to a price on the buyer side and the \textit{best bid} represents the highest price at which someone is willing to buy a given asset. The best bid appears as the first order book entry on the buyer side, closest to the spread. By contrast, an \textit{ask} refers to a price on the seller side and the \textit{best ask} represents the lowest price at which someone is willing to sell a given asset. The best ask appears as the first order book entry on the seller side, closest to the spread. Consequently, the \textit{market price} is the average of the best bid and best ask prices and the \textit{spread} indicates the difference between the best bid and best ask. The most recent price upon which a buyer and seller agreed to trade a security is known as \textit{quote}. In an \textit{order driven market}, liquidity is a synonym for the ease of trading. \textit{Liquidity} stands for the amount of shares provided by parties of the opposing side and is what effectively enables one to buy and sell securities. Liquidity is achieved by submitting limit orders which are not immediately executed. A \textit{market maker} provides liquidity to the market by posting limit orders which are not immediately executed. In return, the market maker pays a lower fee than a market taker, the \textit{maker fee}. By contrast, the \textit{market taker} takes liquidity out of the market by posting either market orders or limit orders which are immediately executed by the exchange. As loss of liquidity is not beneficial to the exchange, the market taker pays a fee known as \textit{taker fee} on a slightly higher scale. \section{Match Engine} \label{sec:match-engine} The \textit{matching engine} is the component which is responsible for the process of matching buy and sell orders at a traditional stock exchange such as \textit{NASDAQ} or \textit{NYSE}, or cryptocurrency exchanges such as \textit{Bitfinex}, or \textit{Bittrex}. In order to determine the outcome of an order, the trader typically submits the order to an exchange and either trades on the live market or gets access to a test environment. Consequently, the order is processed on the current market and there is no option to process it on a historical data set in order to determine its hypothetical outcome, had the order been posted at some time $t$ in the past. For the aforementioned reasons, a \textit{local} match engine is being developed that evaluates the outcome of order placements using a historical order book data set, free of charge. This local match engine is a key element of the order placement optimization process as the outcome of matched orders will directly affect the reward received by an agent which, in turn, will use the reward to try to improve its own capabilities. This section will first define a \textit{trade} as the result of two matching orders. Subsequently, a time horizon--as an addition to the previously introduced order types (Section \ref{sec:orders})--is presented so that we can describe the interface of the match engine that will be used throughout the learning process. Finally the rules relating to the implementation of the local match engine are outlined; these explain the mechanics of the matching process. \subsection{Trade} In order to understand the purpose of the matching process, which is described in more detail below, we first have to define what a \textit{trade} is. A trade results when the orders (Eq. \ref{eq:order}) from two parties on opposing order sides (Eq. \ref{eq:order-side}) agree on a quantity of shares and its price. That is, \begin{equation}\label{eq:trade} Trade=(ts, side, type, quantity, price ), \end{equation} where $ts$ is the time-stamp when the participants agreed on the exchange of the products, $side \in OrderSide$, $type \in OrderType$, $quantity \in \mathbb{R^+}$ and $price \in \mathbb{R^+}$. \subsection{Interface} \label{sec:match-engine-interface} This match engine enables the simulation and evaluation of order placement without the need to consult an electronic trading network. Alongside the order that is sent to the match engine (directly or via an electronic trading network), the user can specify a \textit{time horizon} $H$ indicating how long the order should stay active. The two most commonly used timing mechanisms are: \begin{description} \item[Good Till Time (GTT): ] The order stays in the order book until a specified amount of time elapses. \textit{(Some implementations define this as Good Till Date, which involves specifying a validity expiry date and time for the order.)} \item[Good-Til-Canceled (GTC): ] The order stays in the order book until the user submits a cancellation. \end{description} \hfill \\ The match engine built in this project made available an interface that represents a function $match$ which takes any type of \textit{Order} (Section \ref{sec:orders}) and time horizon $H$ and returns a sequence of \textit{trade} (Eq. \ref{eq:trade}). That is, \begin{equation} match : Order \times H \rightarrow Trades, \end{equation} whereas $|Trades| \in \mathbb{N}$. The order is \textit{filled} (which means "fulfilled") if the sum of the traded quantity is equal to the amount stated in the submitted order, \textit{partially-filled} if the traded quantity is $> 0$ but not filled and \textit{not filled} otherwise. \\ \\ \textit{The matching process behaves differently depending on the submitted order type, and this is explained in the following paragraph.} \subsection{Rules} Compared to the rules applicable to match engines used in electronic trading networks, the rules presented below are rather primitive. Yet they are sufficiently accurate within the subset of the limited capabilities provided to it, as compared to the capabilities of a real world exchange. The rules used by the order matching engine are mainly derived from \cite{match-engine}: \begin{enumerate} \item Limit orders (Eq. \ref{eq:order-limit}) may be partially filled or not filled at all if there are no parties on the opposing side. \item Market orders (Eq. \ref{eq:order-market}) will execute immediately if an opposite order has been previously submitted to the market. \item Market orders may be partially filled, at different prices, depending on the liquidity on the opposing side of the book. \item Attempts are made to match limit orders from a given point in time onward, or in the case of a Good Till Time (GTT), for as long as is specified. \end{enumerate} \subsection{Limitations} Since the match engine used in this project is a rudimentary implementation for the purpose of simulating and analyzing order execution and placement, it features only a subset of what a conventional match engine, used by electronic trading networks, is capable of. That said, the following limitations have to be taken into consideration: \begin{description} \item[Participants:] most importantly, the match engine is used locally where no other participants are interacting during its use. In order to be able to approximate the most likely outcome, historical data serves to simulate the past actions of market participants. While this is valuable real world data, unfortunately it does not cover the possibilities of hidden participants 1) entering or 2) leaving the market upon placing an order during the simulation. Participants who would enter the market would likely be favorable to us as they would act as potential buyers and sellers and therefore provide liquidity. Participants who leave the market would introduce a slight disadvantage as there would be less liquidity. \item[Ordering] this match engine is restricted to simulating the matching of only one order from one participant at a time. Hence, any type of ordered processing of incoming orders (typically solved with a queuing system) is not supported. However, this functionality is also not required for our purposes. \item[Timing inaccuracy:] occurs when submitting an order with a time horizon (see Section \ref{sec:match-engine-interface}). The fact that we are relying on historical data and the time stamps of the orders submitted from participants in the past is a limitation when submitting an order throughout a certain period of time (GTT). It can occur that, at the end of the period, the order would have some time $t$ left (e.g. a few seconds) but the following order book state is nearer to the future than $t$ would allow. We will therefore have to abort the matching process early. \end{description} \section{Order execution and placement} \label{sec:execution-placement} From the above descriptions of the order book and the match engine, it is obvious that a trader has a variety of ways to approach a market and fulfill his duties to buy (or sell) shares. Conceptually, the process a trader follows involves these two steps: \textit{order execution} and \textit{order placement}; the latter is the main subject of this thesis. Many useful definitions which highlight the difficulties related to the domain of order execution were stated by Lim et al. \cite{lim2005optimal} and Guo et al. \cite{guo2013optimal}. Most importantly, \textit{order execution} concerns optimally slicing big orders into smaller ones in order to minimize the price impact, that is, moving the price up by executing large buy orders (respectively down for sell orders) at once. By splitting up a big order into smaller pieces and spreading its execution over an extended time horizon (typically on a daily or weekly basis), the impact cost can be lessened. By contrast, \textit{order placement} concerns optimally placing orders within ten to hundred seconds. Placing refers to the setting of the limit level for a limit order as described in Section \ref{sec:limit-order}. Its aim is to minimize the \textit{opportunity cost} which arises when the price moves against us. Literature\cite{nevmyvaka2006reinforcement, guo2013optimal} suggests using the \textit{volume weighted average price (VWAP)} as measures of the \textit{return} of order placement and order execution. That is, \begin{equation}\label{eq:vwap} p_{vwap}=\frac{\sum{v_p*p}}{V}, \end{equation} whereas $p$ is the price paid for $v_p$ shares and $V$ represents the total volume of shares. %\section{Time series} % %According to the efficient market hypothesis\cite{malkiel1989efficient}, the price of an asset reflects all the information available to the market participants at any give time. %The natural consequence of this vast flow of information is that the price of such an asset changes over time. %As we saw in the previous section \ref{sec:order-book}, the price of an asset is determined by actions taken by traders. %Therefore, financial markets and particularly the order book are best described over time, namely as a \textit{time series}. %More precisely, the definition of a time series is an ordered sequence of values of a variable at equally-spaced time intervals \cite{intro-timeseries}. %The applications of time series data are generally known as Time Series Analysis and Time Series Forecasting, both of which have played important roles throughout this project, and therefore a brief background is provided in this section. % %\subsection{Time series analysis} % %The analysis of data observed at different points in time leads to problems in statistical modeling and inference. %More specifically, the correlation of adjacent points in time can restrict the applicability of conventional statistical methods which traditionally depend on the assumption that these adjacent observations are independent and identically distributed. %A systematic approach, by which one attempts to answer the mathematical and statistical questions posed by these time correlations, is commonly referred to as time series analysis. %Therefore, mathematical models are developed with the primary objective of providing plausible descriptions for sample data. \cite{shumway2000time} %\\ %\\ %Some of the time series behaviors which will be presented within this work may hint at a sort of regularity over time. %We will refer to the notion of regularity using a concept called \textit{stationarity}, as introduced in \cite{shumway2000time}. %\\ %\\ %A \textbf{strictly stationary} time series is one for which the probabilistic behavior of every collection of values %${x_{t_1}, x_{t_2}, ..., x_{t_k}}$ %is identical to that of the time shifted set %${x_{t_1+h}, x_{t_2+h}, ..., x_{t_k+h}}$ %for all time shifts $h=0,\pm1,\pm2,...$ %\\ %\\ %A \textbf{weakly stationary} time series, $x_t$, is a finite variance process such that %\begin{itemize} % \item the mean value function $\mu_t$ is constant and does not depend on time $t$, and % \item the autocovariance function $\gamma(s, t)$, depends on $s$ and $t$ only through their difference $|s-t|$. %\end{itemize} % %Whereas $\mu_t$ is defined as % %\begin{equation} % \mu_{t}=\mathbb{E}(x_t)=\int_{-\infty}^{\infty} x f_t(x) dx %\end{equation} % %with $f_t$ being the \textit{marginal density function} \cite{shumway2000time}. %And $\gamma(s, t)$ is defined as %\begin{equation} % \gamma(s, t)=cov(x_s, x_t)=\mathbb{E}[(x_s-\mu_s)(x_t-\mu_t)] %\end{equation} % %for all time points $s$ and $t$. %\\ %\\ %\textit{Henceforth, we will use the term stationary to mean weakly stationary; if a process is stationary in the strict sense, we will use the term strictly stationary.} % %\subsection{Time series forecasting} % %In statistics, prediction is a part of statistical inference. %Providing a means for the transfer of knowledge about a sample of a population to the whole population, and to other related populations, is one definition of statistics. %However, this is not necessarily equivalent to the process of predicting over time. %This process is known rather as forecasting and describes the transfer of information across time and often to very specific point in time. \cite{wiki-timeseries}. %Hence the problem is defined in \cite{ito1993encyclopedic} as: \textit{forecasting future values $X_{t+h}$ where $h > 0$ of a weakly stationary process ${X_t}$ from the known values $X_s$ where $s \leq t$}. %The integer $h$ is called lead time or forecasting horizon, whereas $h$ stands for horizon. %\\ %\\ %Forecasting methods can be classified, according to \cite{chatfield2000time}, into three types: \textit{Judgemental forecasts} produce projections based on intuition, inside knowledge, and any other relevant information. %\textit{Univariate methods} forecasts depend on present or past values of the time series on which the forecast is projected. %Finally, \textit{multivariate methods} forecasts depend on one or more additional time series variables or multivariate models. %\\ %\\ %\textit{Over the course of this work, we make use of univariate and multivariate methods.} \section{Reinforcement Learning} \label{sec:reinforcement-learning} This section first aims to describe what Reinforcement Learning is and highlight its differences compared to other machine learning paradigms. We will briefly discuss why this particular technique might be an appropriate choice for the task of optimizing order placement. Then, a basic understanding of Markov Decision Processes will be provided, after which we will explain the interaction between the Reinforcement Learning components. This will be followed by a description of their properties. \subsection{Advantages of end-to-end learning} \begin{figure}[H] \centering \makebox[\linewidth]{ \includegraphics[width=8cm]{ml-rl.png} } \caption{Categorization of machine learning techniques} \label{fig:ml-rl} \end{figure} Reinforcement learning is a specific learning approach in the machine learning (see Figure \ref{fig:ml-rl}) field and aims to solve problems which involve \textit{sequential decision making}. Therefore, when a decision made in a system affects future decisions and eventually an outcome, the result is that we learn more about the optimal sequence of decisions with reinforcement learning. \begin{figure}[H] \centering \makebox[\linewidth]{ \includegraphics[width=10cm]{rl-pipeline.png} } \caption{Reinforcement learning end-to-end learning pipeline} \label{fig:rl-pipeline} \end{figure} With respect to the optimization of order placement in limit order books, statistical approaches have long been the preferred choice. While statistics emphasizes inference from a process, machine learning emphasizes the prediction of the future with respect to some variable. Machine learning paradigms, such as supervised learning, rely on an algorithm that learns by already-labeled data presenting a specific situation provided with the right action to do. From there, the algorithm tries to generalize the model. In reinforcement learning, by contrast, there is no supervision and instead an agent learns by maximizing rewards. The feedback retrieved while executing a task that has a sequence of actions might be delayed over several time steps and hence the agent might spend some time exploring until it finally reaches the goal and can update its strategy accordingly. This process can be regarded as \textit{end-to-end learning} and is illustrated in Figure \ref{fig:rl-pipeline}. In abstract terms, the agent makes an \textit{observation} of its environment and estimates a \textit{state} for which it \textit{models and predicts} the \textit{action} to be taken. Once the action is executed, the agent receives a \textit{reward} and will take this into consideration during future prediction phases. The beauty of this is that an arbitrarily complex process can be regarded as a black box as long as it can take an input from the learner to do its job and report how well the task was executed. In our context, this means that we would model the order placement process pipeline whereas the learner improves upon the outcome of the submitted orders. In addition, for reinforcement learning problems, the data is not independent nor identically distributed (I.I.D). The agent might in fact, while exploring, miss out on some important parts to learn the optimal behavior. Hence, time is crucial as the agent must explore as many parts of the environment as possible to be able to take the appropriate actions \cite{rl-demystified}. \\ \\ \textbf{Example:} Since we are working with financial systems, let us assume we want to buy and sell stocks on a stock exchange. In reinforcement learning terms, the trader is represented as an \textit{agent} and the exchange is the \textit{environment}. The details of the environment do not have to be known as it is rather regarded as a black box. The agent's purpose is to observe features of the environment, for example, the current price of a stock. The agent then makes estimates about the situation of the observed state and decides which action to take next – buy or sell. The action is then sent to the environment which determines whether this was a good or bad choice, for example, whether we made a profit or a loss. \subsection{Markov Decision Process (MDP)} \label{rl-mdp} A process such as the one outlined above can be formalized as a Markov Decision Process. An MDP is a 5-tuple $(S, A, P, R, \gamma)$ where: \begin{enumerate} \item $S$ is the finite set of possible states $s_t \in S$ at some time step. \item $A(s_t)$ is the set of actions available in the state at time step $t$, that is $a_t \in A(s_t)$, whereas $A=\bigcup_{s_t \in S} A(s_t)$ \item $p(s_{t+1} | s_t, a_t)$ is the state transition model that describes how the environment state changes, depending on the action $a$ and the current state $s_t$. \item $p(r_{t+1} | s_t, a_t)$ is the reward model that describes the immediate reward value that the agent receives from the environment after performing an action in the current state $s_t$. \item $\gamma \in [0,1]$ is the discount factor which determines the importance of future rewards. \end{enumerate} \subsection{Interaction} \begin{figure}[H] \centering \makebox[\linewidth]{ \includegraphics[width=10cm]{rl-overview.png} } \caption{Figure taken from \cite{rl-demystified}: interaction between a reinforcement learning agent and the environment. An action is taken by the agent that results in some reward and a new state.} \label{fit:rl-overview} \end{figure} \\ \\ A reinforcement learning problem is commonly defined with the help of two main components: \textit{Environment} and \textit{Agent}. \\ \\ With the interfaces provided above (Section \ref{rl-mdp}), we can define an interaction process between an agent and environment by assuming discrete time steps: $t=0, 1, 2, ...$ \begin{enumerate} \item The agent observes a state $s_t \in S$ \item and produces an action at time step $t$: $a_t \in A(s_t)$ \item which leads to a reward $r_{t+1} \in R$ and the next state $s_{t+1}$ \end{enumerate} \\ \\ During this process, and as the agent aims to maximize its future reward, the agent consults a \textit{policy} that dictates which action to take, given a particular state. \subsubsection{Policy} A policy is a function that can be either deterministic or stochastic. The distribution $\pi(a|s)$ is used for a stochastic policy and a mapping function $\pi(s) : S \rightarrow A$ is used for a deterministic policy, whereas $S$ is the set of possible states and $A$ is the set of possible actions. \\ \\ The stochastic \textit{policy} at time step $t$: $\pi_t$ is a mapping from state to action probabilities as a result of the agent's experience, and therefore, $\pi_t(a|s)$ is the probability that $a_t=a$ when $s_t=s$. \subsubsection{Reward} The goal is that the agent learns how to select actions in order to maximize its future reward when submitting them to the environment. We rely on the standard assumption that future rewards are discounted by a factor of $\gamma$ per time-step in the sense that the total discounted reward accounts to $r_1 + \gamma*r_2 + \gamma^2*r_3 + \gamma^3*r_4 + ...$ \\ Hence, we can define the future discounted \textit{return} at time $t$ as \begin{equation}\label{eq:discounted-return} R_t=\sum_{i=t}^{T}{\gamma^{i-t}{*}r_{i}}, \end{equation} where $T$ is the length of the episode (which can be infinity if there is no maximum length for the episode). \\ The discounting factor has two purposes: it prevents the total reward from going to infinity (since $0 \leq \gamma \leq 1$), and it enables the preferences of the agent for immediate rewards or potential future ones to be controlled \cite{rl-demysitifed2}. \subsubsection{Value Functions} When the transition function of an MPD is not available, model-free reinforcement learning allows the agent to simply rely on some trial-and-error experience for action selection in order to learn an optimal policy. Therefore, the value of a state $s$ indicates how good or bad a state is for the agent to be in, measured by the expected total reward for an agent starting from this state. Hence we introduce the \textbf{value function}, which depends on the policy the agent chooses its actions to be guided by: \begin{equation}\label{eq:value-function} V^{\pi}(s)=\mathbb{E}[R_t]=\mathbb{E}[\sum_{i=1}^{T}{\gamma^{i-1}{r_{i}}}]\ \forall s \in S \end{equation} \\ Among all value functions, there is an \textbf{optimal value function} which has higher values for all states \begin{equation}\label{eq:optimal-value-function} V^{*}(s)=\max_{\pi}\ V^{\pi}(s)\ \forall s \in S \end{equation} \\ Furthermore, the \textbf{optimal policy} $\pi^*$ can be derived as \begin{equation}\label{eq:value-function-policy} \pi^{*}=\arg\max_{\pi}\ V^{\pi}(s)\ \forall{s}\in{S} \end{equation} \\ In addition to the value of a state with respect to the expected total reward to be achieved, we might also be interested in a value which determines the value of being an a certain state $s$ and taking a certain action $a$. To get there, we first introduce the \textbf{Q function}, which takes a state-action pair and returns a real value: \begin{equation}\label{eq:q-function} Q:S\times{A}\rightarrow{\mathbb{R}} \end{equation} \\ Finally, the \textbf{optimal action-value function} (or \textbf{optimal Q function}) $Q^*(s,a)$ as the maximum expected return achievable after seeing some state $s$ and then taking some action $a$. That is, \begin{equation}\label{eq:optimal-action-value-function} Q^*(s,a)=\max_{\pi}\ \mathbb{E} [ R_t | s_t=s, a_t=a, \pi ] \end{equation} with the policy $\pi$ mapping the states to either actions or distributions over actions. \\ \\ The relationship between the \textit{optimal value function} and the \textit{optimal action-value function} is, as their names suggest, easily obtained as \begin{equation} V^*(s)=\max_{a}\ Q^*(s,a)\ \forall{s}\in{S} \end{equation} \\ and thus the \textit{optimal policy} for state $s$ can be derived by choosing the action $a$ that gives maximum value \begin{equation}\label{eq:optimal-policy-s} \pi^*(s)=\arg \max_{a}\ Q^*(s, a)\ \forall{s}\in{S} \end{equation} \subsection{Environment} \label{sec:rl-environment} There are two types of environments: In a \textit{deterministic environment}, both the state transition model and reward model are deterministic functions. In this setup, if the agent in a given state $s_t$ repeats a given action $a$, the result will always be the same next state $s_{t+1}$ and reward $r_t$. In a \textit{stochastic environment}, there is uncertainty about the outcome of taking an action $a$ in state $s_t$ as the next state $s_{t+1}$ and received reward $r_t$ might not be the same each time. \textit{Deterministic environments are, in general, easier to solve as the agent learns to improve the policy without uncertainties in the MDP. } \subsection{Agent} \label{sec:rl-agent} The goal of the agent is to solve the MDP by finding the optimal policy, which means finding the sequence of actions that leads to receiving the maximum possible reward. However, there are various approaches to this, which are commonly categorized (see \cite{rl-demysitifed2}) as follows. A \textit{value based agent} starts off with a random value function and then finds a new (improved) value function in an iterative process, until reaching the optimal value function (Eq. \ref{eq:optimal-value-function}). As shown in Eq. \ref{eq:value-function} one can easily derive the optimal policy from the optimal value function. A \textit{policy based agent} starts off with a random policy, then finds the value function of that policy and derives a new (improved) policy based on the previous value function, until it finds the optimal policy (Eq. \ref{eq:optimal-policy-s}). Each policy is guaranteed to be a strict improvement over the previous one (unless it is already optimal). As stated in Eq. \ref{eq:value-function-policy}, given a policy, one can derive the value function. The \textit{actor-critic agent} is a combination of a value-based and policy-based agent. Both the policy and the reward from each state will be stored. \textit{Model-based agents} attempt to approximate the environment using a model. It then suggests the best possible behavior. \subsection{Deep Reinforcement Learning} \label{sec:deep-reinforcement-learning} \textit{``Reinforcement learning can be naturally integrated with artificial neural networks to obtain high-quality generalization''} \cite{deeprlcourse}. The term \textit{generalization} refers to the action-value function (Eq. \ref{eq:optimal-action-value-function}) and the fact that this value is estimated for each state separately--which becomes totally impractical for large state spaces that can occur in real world scenarios. Deep reinforcement learning generally means approximating the value function, the policy, or the model of reinforcement learning via a neural network. As is preferred in reinforcement learning, neural networks approximate a function as a non-linear function. Therefore, the estimate of the approximation is a local optimum, which is not always desirable. In our particular case, we use deep reinforcement learning in order to approximate the action-value function (Eq. \ref{eq:optimal-action-value-function}). Therefore, we represent the action-value function with weights $\theta$ as, \begin{equation} Q(s, a; \theta) \approx Q^*(s,a) \end{equation} Given a state $s$, the neural network outputs $n$ linear output units (corresponding to $n$ actions), as shown in Figure \ref{fig:drl-qvalues}. The agent will then choose the action with the maximum Q-value. \begin{figure}[H] \centering \makebox[\linewidth]{ \includegraphics[width=8cm]{drl-qvalues} } \caption{Neural network outputs Q-values} \label{fig:drl-qvalues} \end{figure} \hfill \\ In terms of the previously described reinforcement end-to-end learning pipeline, the use of a function approximator simplifies this process. We can omit the state estimation step and instead rely on raw features \cite{mnih2013playing}, as illustrated in Figure \ref{fig:drl-pipeline}: \begin{figure}[H] \centering \makebox[\linewidth]{ \includegraphics[width=8cm]{drl-pipeline.png} } \caption{Deep Reinforcement learning end-to-end learning pipeline} \label{fig:drl-pipeline} \end{figure}
{ "alphanum_fraction": 0.7727766611, "avg_line_length": 72.468503937, "ext": "tex", "hexsha": "08c7f91f6ed5f4fa001adda7bead97f238b03769", "lang": "TeX", "max_forks_count": 35, "max_forks_repo_forks_event_max_datetime": "2022-03-01T23:17:00.000Z", "max_forks_repo_forks_event_min_datetime": "2019-02-08T02:00:31.000Z", "max_forks_repo_head_hexsha": "b2c2627ff0e86e27f6829170d0dac168d8e5783b", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "mikimaus78/ml_monorepo", "max_forks_repo_path": "ctc-executioner/report/chapter-2.tex", "max_issues_count": 2, "max_issues_repo_head_hexsha": "b2c2627ff0e86e27f6829170d0dac168d8e5783b", "max_issues_repo_issues_event_max_datetime": "2019-11-09T01:30:32.000Z", "max_issues_repo_issues_event_min_datetime": "2019-02-23T18:54:22.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "mikimaus78/ml_monorepo", "max_issues_repo_path": "ctc-executioner/report/chapter-2.tex", "max_line_length": 303, "max_stars_count": 51, "max_stars_repo_head_hexsha": "b2c2627ff0e86e27f6829170d0dac168d8e5783b", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "mikimaus78/ml_monorepo", "max_stars_repo_path": "ctc-executioner/report/chapter-2.tex", "max_stars_repo_stars_event_max_datetime": "2022-03-16T09:07:03.000Z", "max_stars_repo_stars_event_min_datetime": "2019-02-01T19:43:37.000Z", "num_tokens": 8754, "size": 36814 }
\documentclass[mathserif,t]{beamer} \usepackage{pgfplots} \usetikzlibrary{positioning} \usetikzlibrary{fit} \usetikzlibrary{backgrounds} \usetikzlibrary{calc} \usetikzlibrary{shapes} \usetikzlibrary{mindmap} \usetikzlibrary{decorations.text} \pgfplotsset{compat=1.7} \usetheme{Boadilla} \usecolortheme{seagull} \newcommand{\tikzmark}[1]{\tikz[overlay,remember picture] \node (#1) {};} \setbeamertemplate{enumerate items}{(\arabic{enumi})} \setbeamertemplate{itemize items}[circle] \setbeamercovered{transparent=15} \newcounter{savedenum} \newcommand*{\saveenum}{\setcounter{savedenum}{\theenumi}} \newcommand*{\resume}{\setcounter{enumi}{\thesavedenum}} \newcommand{\blind}{0} \usepackage{amsmath,amsthm,amssymb,amsfonts} \usepackage{enumerate} \usepackage{graphicx} % Allows including images \usepackage{color,colortbl} \usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables \usepackage{caption} \usepackage{algorithm} \usepackage{algpseudocode} \usepackage{bbm} \setbeamertemplate{bibliography item}{} \setbeamertemplate{theorems}[numbered] \newtheorem{thm}{Theorem} \newtheorem{lem}[thm]{Lemma} \newtheorem{cor}{Corollary} \newtheorem*{defi*}{Definition} \newcommand{\G}{c} \providecommand{\mb}[1]{\boldsymbol{#1}} \providecommand{\sct}[1]{{\normalfont\textsc{#1}}} \providecommand{\mt}[1]{\widetilde{#1}} \newcommand{\Real}{\mathbb{R}} \newcommand{\Mgc}{MGC} \newcommand{\mbx}{X} \newcommand{\mby}{Y} \newcommand{\GG}{c} \newcommand{\E}{\hat{E}} \definecolor{UniBlue}{RGB}{250,250,250}%{0,26,56} \definecolor{UniOrange}{RGB}{0,26,56}%{253,185,19} \definecolor{UniTitle}{RGB}{0,0,0}%{192,192,192} \definecolor{UniText}{RGB}{0,0,0}%{200,200,200} \definecolor{OutCirc}{RGB}{125,154,197}%{185,224,247} \setbeamerfont{title}{size=\LARGE} \setbeamerfont{section title}{size=\Huge} \setbeamercolor{title}{fg=UniTitle} \setbeamertemplate{frametitle}{\fontfamily{qtm}\LARGE\color{UniOrange}\bfseries\insertframetitle\vskip-6pt\par\hrulefill} \setbeamercolor{background canvas}{bg=UniBlue} \setbeamercolor{normal text}{fg=UniText} \setbeamercolor{structure}{fg=UniText} \setbeamercovered{invisible} \setbeamercolor{navigation symbols}{fg=UniOrange, bg=UniOrange} \setbeamercolor{palette sidebar secondary}{fg=UniOrange,bg=UniOrange} \setbeamercolor{section in sidebar shaded}{fg=UniOrange,bg=UniOrange} \setbeamercolor{footlinecolor}{fg=UniOrange,bg=UniOrange} \setbeamertemplate{navigation symbols}{} \setbeamertemplate{footline} { \leavevmode% \hbox{% \hfill \hskip5pt% \color{UniOrange} \insertshortauthor \hskip245pt% \usebeamercolor[fg]{navigation symbols}%\insertslidenavigationsymbol% \insertframenavigationsymbol% %\insertsubsectionnavigationsymbol% \insertsectionnavigationsymbol% %\insertdocnavigationsymbol% \hskip15pt% \insertshorttitle: \ \insertframenumber/\inserttotalframenumber} } \setbeamertemplate{section page} { \begingroup \begin{beamercolorbox}[sep=100pt,center]{section title} \usebeamerfont{section title}\insertsection\par \end{beamercolorbox} \endgroup } \title[MGC]{\fontfamily{qtm} \bfseries Dependency Discovery via\\ Multiscale Graph Correlation} \bigskip \bigskip \author[C. Shen]{\large\textcolor{UniTitle}{\textit{Cencheng Shen}}} % Your name \institute[JHU]{\footnotesize\color{UniTitle}\textit{University of Delaware}\\ % Your institution as it will appear on the bottom of every slide, may be shorthand to save space \bigskip \bigskip \bigskip \bigskip \textit{Collaborators: Carey E. Priebe, Joshua T. Vogelstein, Shangsi Wang, Ronak Mehta, Eric Bridgeford, Sambit Panda, Junhao Xiong, Youjin Lee, Qing Wang, Alex Badea, Xu Ting, Mauro Maggioni.\\ \medskip Acknowledgment: NSF DMS, DARPA SIMPLEX.\\ \medskip \medskip}} \date{\footnotesize\color{UniTitle}\footnotesize\textit{}} % Date, can be changed to a custom date \tikzset{ invisible/.style={opacity=0}, visible on/.style={alt=#1{}{invisible}}, alt/.code args={<#1>#2#3}{% \alt<#1>{\pgfkeysalso{#2}}{\pgfkeysalso{#3}} % \pgfkeysalso doesn't change the path }, } \AtBeginSection{\frame{\sectionpage}} \begin{document} \bibliographystyle{ieeetr} \begin{frame} \titlepage % Print the title page as the first slide \end{frame} %1. Intro; 2. it is linear regression and classification; 3. its origin, and why we start investigation; 4. explain title \setbeamertemplate{section in toc}{\inserttocsectionnumber.~\inserttocsection} \begin{frame} \frametitle{Overview} % Table of contents slide, comment this block out to remove it \tableofcontents % Throughout your presentation, if you choose to use \section{} and \subsection{} commands, these will automatically be printed on this slide as an overview of your presentation \end{frame} \section{Motivation} \begin{frame}{Motivation} Given paired data $(\mathcal{X}_{n},\mathcal{Y}_{n})=\{(x_{i},y_{i}) \in \Real^{p} \times \Real^{q}, \ \mbox{ for } i=1,\ldots,n\}$, \pause \begin{itemize}[<+->] \item Are they related? \item How are they related? \end{itemize} \pause \medskip %If the data has no dependency signal, it is futile to build a joint model or to predict one data from another. \begin{table} \centering \begin{tabular}{|c|c|} \hline \textbf{$X$} & \textbf{$Y$} \\ \hline % Oracle \Mgc & \textbf{50} & 60 & \textbf{70} & \textbf{135} \\ % \hline brain connectivity & creativity / personality \\ \hline brain shape & health \\ \hline gene / protein & cancer\\ \hline social networks & attributes \\ \hline anything & anything else \\ \hline \end{tabular} \end{table} \end{frame} \begin{frame}{Formal Definition of Independence Testing} \begin{align*} & (x_{i},y_{i}) \stackrel{i.i.d.}{\sim} F_{XY}, \ \ i=1, \ldots, n\\ & \\ & H_{0}: F_{XY}=F_{X}F_{Y},\\ & H_{A}: F_{XY} \neq F_{X}F_{Y}. \end{align*} \pause \medskip A dependence / correlation measure is any statistic that can be used to test the above hypothesis. \pause \medskip We desire a universally consistent test via some dependence measure, i.e., the testing power converges to $1$ as $n \rightarrow \infty$ against any dependent $F_{XY}$. \pause \medskip Without loss of generality, we shall assume $F_{XY}$ has finite second moments. %\pause %\medskip %For a given test statistic (MGC, Dcorr, HHG, HSIC, etc.), finite-sample testing is almost always carried out via a permutation test (unless the null distribution is known, which is generally not the case), and the null is rejected when the p-value is sufficiently small. %\pause %\medskip %For any scientist trying to identify some relationship from their data, they desire a test statistic of high finite-sample testing power (rather than asymptotically)! %The power of a test is defined as the probability that it correctly rejects the null when the null is indeed false, and has power equal to the type $1$ error level when the null is true. And a test is universally consistent if its power converges to $1$ as $n \rightarrow \infty$ whenever $f_{xy} \neq f_x f_y$. \end{frame} \begin{frame}{Benchmarks} \begin{tikzpicture}[mindmap, concept/.append style={fill={none}}, root concept/.style={concept color=OutCirc}, level 1 concept/.append style= {every child/.style={concept color=OutCirc},level distance = 32mm}, level 2 concept/.append style= {every child/.style={concept color=UniOrange},level distance = 19mm}, every node/.append style={align=center,scale=0.8}, ] \node [concept,font=\huge,color=OutCirc] {\textcolor{UniTitle}{Correlation Measures}} child[grow=0, visible on=<3->] {node[concept] {\large Linear} child[grow=80, visible on=<3->]{node[concept] {\large Pearson}} child[grow=30, visible on=<3->]{node[concept] {\large Rank}} child[grow=-20, visible on=<3->]{node[concept] {\large CCA}} child[grow=-70, visible on=<3->]{node[concept] {\large RV}} } child[grow=-90,visible on=<4->] {node[concept] {\large Non-linear} child[grow=-30,visible on=<4->]{node[concept] {\large MIC}} % child[grow=270,visible on=<3->]{node[concept] {\large HHG}} child[grow=210,visible on=<4->]{node[concept] {\large Mantel}} } child[grow=180,visible on=<5->] {node[concept] {\large Universal \\ Consistent} child[grow=110,visible on=<5-> ] {node[concept] {\large Dcorr}} child[grow=180,visible on=<5->] {node[concept] {\large HSIC}} child[grow=250,visible on=<5->] {node[concept] {\large HHG}} }; %\node at (0,0) [inner sep=9mm,decorate,circle,decoration= %{text along path,text={}}] {}; %\draw decorate[decoration={text along path,text={Equally Effective}}] %{(-3,0) arc (135:45:.5cm)}; \end{tikzpicture} \end{frame} \begin{frame}{Motivations} Modern data sets may be \textbf{high-dimensional, nonlinear, noisy, of limited sample size, structured, from disparate spaces}. Thus we desire a test that \pause \medskip \begin{itemize}[<+->] \item is consistent against all dependencies; \item has good finite-sample testing performance; \item is easy to understand and efficient to implement; \item *provides insights into the dependency structure. \end{itemize} \pause \medskip %Existing method has pros and cons with respect to each point. \\ % %\pause %\medskip To that end, we propose the \textbf{multiscale graph correlation} in [\textit{Shen et al.(2018)}]. \end{frame} \section{Methodology} \begin{frame}{Flowchart of \Mgc} \makebox[\textwidth][c]{% \begin{tikzpicture}[ outpt/.style={->,OutCirc,very thick}, outpt2/.style={-,OutCirc,thick}, >=stealth, every node/.append style={align=left}] \node (source) at (0,0){$(\mathcal{X}_{n},\mathcal{Y}_{n})$}; \node (source2)at (3,3){$d(\cdot,\cdot)$}; \pause \node (kaela)[right=0.8cm of source]{\begin{tabular}{@{}c}Computing \\ Distances \\ \& Centering: \\ $A, B \in \mathbb{R}^{n \times n}$\end{tabular}}; \draw[outpt](source)--(kaela); \draw[outpt](source2)--(kaela); \pause \node (dc)[below=0.8cm of kaela]{\begin{tabular}{@{}c} $Dcov =$ \\ $\sum\limits_{i,j=1}^{n} A_{ij}B_{ji}$ \end{tabular}}; \draw[outpt2](kaela)--(dc); \pause \node (accessfile) [right=0.9cm of kaela] {\begin{tabular}{@{}c}Incorporating \\ Locality: \\ $\{A^{k}, B^{l} \in \mathbb{R}^{n \times n}$, \\ for $k,l \in [n]\}$ \end{tabular}}; \draw[outpt](kaela)--(accessfile); \pause \node (dc2)[below=0.8cm of accessfile]{\begin{tabular}{@{}c} $Dcov^{k,l} =$ \\ $\sum\limits_{i,j=1}^{n} A_{ij}^{k}B_{ji}^{l}-\sum\limits_{i,j=1}^{n} A_{ij}^{k} \sum\limits_{i,j=1}^{n} B_{ij}^{l}$ \end{tabular}}; \draw[outpt2](accessfile)--(dc2); % Draw background \begin{pgfonlayer}{background} % Left-top corner of the background rectangle \path (kaela.west |- kaela.north)+(-0.5,0.5) node (a) {}; % Right-bottom corner of the background rectanle \path (accessfile.east |- accessfile.south)+(+0.5,-0.5) node (c) {}; % Draw the background \path[rounded corners, draw=OutCirc, dashed] (a) rectangle (c); \end{pgfonlayer} \pause \node (screen)[above right=1.2cm of accessfile]{All Local \\ Correlations \\ $\{Dcorr^{k,l}\}$\\$\in [-1,1]^{n \times n}$}; \draw[outpt](accessfile)--(screen.west); \pause \node (braille)[right =0.8cm of accessfile]{Smoothed \\ Maximum: \\ $c^{*} \in [-1,1]$, \\and Optimal \\ Scale $(k^*,l^*)$}; \draw[outpt](accessfile)--(braille); \pause \node (enlarge)[below =0.8cm of braille]{P-value by \\ Permutation \\ Test}; \draw[outpt](accessfile)--(enlarge.west); \begin{pgfonlayer}{background} % Left-top corner of the background rectangle \path (screen.west |- screen.north)+(-0.05,0.05) node (a) {}; % Right-bottom corner of the background rectanle \path (enlarge.east |- enlarge.south)+(0.3,0) node (c) {}; % Draw the background \path[rounded corners, draw=UniOrange,thick] (a) rectangle (c); \end{pgfonlayer} \end{tikzpicture} } \end{frame} \begin{frame}{Computing Distance and Centering} \textbf{Input:} $\mathcal{X}_{n}=[x_{1},\ldots,x_{n}]$ as the data matrix with each column representing one sample observation, and similarly $\mathcal{Y}_{n}$. A distance or kernel function $d(\cdot,\cdot)$, by default the Euclidean distance.\\ \pause \medskip \textbf{Distance Computation: } Let $\tilde{A}$ be the $n \times n$ Euclidean distance matrices of $\mathcal{X}_{n}$: \begin{align*} \tilde{A}_{ij}=d(x_i,x_j)=\|x_{i}-x_{j}\|_{2}, \end{align*} and similarly $\tilde{B}$ from $\mathcal{Y}_{n}$.\\ \pause \medskip \textbf{Centering:} Then we center $\tilde{A}$ and $\tilde{B}$ by columns, with the diagonals excluded: \begin{equation} \label{localCoef2} A_{ij}= \begin{cases} \tilde{A}_{ij}-\frac{1}{n-1}\sum_{s=1}^{n} \tilde{A}_{sj}, & \text{if $i \neq j$}, \\ 0, & \text{if $i=j$}; \end{cases} \end{equation} similarly for $B$. \end{frame} %\begin{frame}{Examples} %A few examples of $\G$: %\begin{itemize}[<+->] %\item The Pearson's product-moment correlation coefficient by taking $a_{ij}=x_i$ and $b_{ij}=y_i$. %\item The Spearman and Kendall's rank correlations by setting $a_{ij}$ to be $rank(x_i)-rank(x_j)$ and $sign(x_i-x_j)$ respectively. %\item The Mantel coefficient [\textit{Mantel (1967)}]\cite{Mantel1967} by using $a_{ij}=|x_i-x_j|_{2}$ (i.e. Euclidean distance). %\item The distance correlation [\textit{Szekely et al.(2007)}]\cite{SzekelyRizzoBakirov2007} by using the doubly-centered distance entries for $a_{ij}$ and $b_{ij}$. %\item The modified distance correlation [\textit{Szekely and Rizzo (2013)}] \cite{SzekelyRizzo2013a} by slightly tweaking $a_{ij}/b_{ij}$ of dcorr. %\end{itemize} %\end{frame} \begin{frame}{Incorporating the Locality Principle} \pause \textbf{Ranking:} Define $\{R^{A}_{ij}\}$ as the ``rank'' of $x_i$ relative to $x_j$, that is, $R^{A}_{ij}=k$ if $x_i$ is the $k^{th}$ closest point (or ``neighbor'') to $x_j$, as determined by ranking the set $\{\tilde{A}_{1j},\tilde{A}_{2j},\ldots,\tilde{A}_{nj}\}$ by ascending order. Similarly define $R^{B}_{ij}$ for the $y$'s. \pause \medskip For any $(k,l) \in [n]^2$, define the rank truncated matrices $A^{k}, B^{l}$, and the joint distance matrix $C^{kl}$ as \begin{align*} A_{ij}^{k} &=A_{ij} \mb{I}(R^{A}_{ij} \leq k), \\ B_{ij}^{l} &=B_{ij} \mb{I}(R^{B}_{ij} \leq l). \end{align*} \pause \medskip When ties occur, minimal rank is recommended, e.g., if $\mby$ only takes two value, $R^{B}_{ij}$ takes value in $\{1,2\}$ only. We assume no ties for each of presentation. \end{frame} \begin{frame}{Local Distance Correlations} \pause \textbf{A Family of Local Correlations:} Let $\circ$ denote the entry-wise product, $\E(\cdot)=\frac{1}{n(n-1)}\sum_{i \neq j}^{n} (\cdot)$ denote the diagonal-excluded sample mean of a square matrix, then the sample local covariance, variance, and correlation are defined as: \pause \begin{align*} dCov^{k,l}(\mathcal{X}_{n},\mathcal{Y}_{n}) &= \E(A^{k} \circ B^{l'})- \E(A^{k})\E(B^{l}),\\ dVar^{k}(\mathcal{X}_{n}) &=\E(A^{k} \circ A^{k'})- \E^2(A^{k}), \\ dVar^{l}(\mathcal{Y}_{n}) &=\E(B^{l} \circ B^{l'})- \E^2(B^{l}), \\ dCorr^{k,l}(\mathcal{X}_{n},\mathcal{Y}_{n}) &=dCov^{k,l}(\mathcal{X}_{n},\mathcal{Y}_{n}) / \sqrt{dVar^{k}(\mathcal{X}_{n}) \cdot dVar^{l}(\mathcal{Y}_{n})}. \end{align*} \pause for $k,l=1,\ldots,n$. If $dVar^{k}(\mathcal{X}_{n}) \cdot dVar^{l}(\mathcal{X}_{n}) \leq 0$, we set $dCorr^{kl}(\mathcal{X}_{n},\mathcal{Y}_{n})=0$ instead. \pause \medskip There are a maximum of $n^2$ different local correlations. At $k=l=n$, $dCorr^{kl}(\mathcal{X}_{n},\mathcal{Y}_{n})$ equals the ``global'' distance correlation $dCorr(\mathcal{X}_{n},\mathcal{Y}_{n})$ by \textit{Szekely et al.(2007)}. \end{frame} %\begin{frame}{MGC} %\pause %\textbf{MGC as optimal local correlation:} In $\{dCorr^{kl}(X_{n},Y_{n})\}$, we shall take the ``optimal'' local correlation as the \Mgc~statistic $\G^{*}(X_{n},Y_{n})$. %\pause %\medskip %However, directly taking the maximum local correlation $\max_{(k,l) \in [n]^2}\{dCorr^{k,l}(X_{n},Y_{n})\}$ will yield a biased statistic under independence, i.e., the maximum is always larger than $0$ in expectation even under independent relationship. %\pause %\medskip %Instead, we take a smoothed maximum by: %\begin{itemize}[<+->] %\item Pick a threshold $\tau \geq 0$; %\item Compute the largest connected component $R=\{(k,l)$ such that $dCorr^{kl}(X_{n},Y_{n})>\max\{\tau, dCorr^{nn}(X_{n},Y_{n})\} \}$; %\item Within the significant region $R$, set $\GG^{*}(X_{n},Y_{n})=\max_{ (k,l) \in R} \{dCorr^{k,l}(X_{n},Y_{n})\}$; %\item If the number of elements in $R$ is less than $2n$, or the $\GG^{*}(X_{n},Y_{n})$~is no more than $dCorr^{nn}(X_{n},Y_{n})$, take $\GG^{*}(X_{n},Y_{n})=dCorr^{nn}(X_{n},Y_{n})$ instead. %\end{itemize} %\end{frame} \begin{frame}{Smoothed Maximum $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n})$} One would like to use the optimal local correlation for testing.\\ \pause \medskip But directly taking the maximum local correlation \begin{align*} \max_{(k,l) \in [n]^2}\{Dcorr^{k,l}(\mathcal{X}_{n},\mathcal{Y}_{n})\} \end{align*} will yield a biased statistic under independence, i.e., the maximum is always larger than $0$ in expectation even under independent relationship! \pause \medskip Instead, we take a smoothed maximum, by finding a connected region in the local correlation map with significant local correlatons -- if such a region exists, use the maximum within the region. \end{frame} \begin{frame}{Smoothed Maximum} Pick a threshold $\tau \geq 0$ (we choose by an approximate null distribution of $Dcorr$, which is symmetric beta and converges to $0$ as $n \rightarrow \infty$), compute the set \begin{align*} \{(k,l) \mbox{ such that } Dcorr^{k,l}(\mathcal{X}_{n},\mathcal{Y}_{n})>\max\{\tau, Dcorr(\mathcal{X}_{n},\mathcal{Y}_{n})\} \}, \end{align*} \pause and calculate the largest connected component $R$ of the set. \pause \medskip If there are sufficiently many elements in $R$ $(>2n)$, take the maximum correlation within $R$ as \Mgc~statistic $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n})$, \pause and set the neighborhood pair as the optimal scale $(k^*,l^*)$. %\pause %\medskip %$\tau$ has negligible effect on theoretical properties of \Mgc~but is somewhat important for limited-sample performance. More details on smoothing can be found in [\textit{Shen et al.(2018)}]. \end{frame} \begin{frame}{Permutation Test} To get a p-value by \Mgc~for any given data, we utilize the permutation test: randomly permute index of the second data set for $r$ times, compute the permuted \Mgc~statistic $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n}^{\pi})$ for each permutation $\pi$, and estimate \begin{align*} Prob(c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n})>c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n}^{\pi})) \end{align*} as the p-value. \pause \medskip This is a standard nonparametric testing procedure employed by Mantel, Dcorr, HHG, HSIC, where the null distribution of the dependency measure cannot be exactly derived. \end{frame} \begin{frame}{Computation Complexity} \begin{itemize} \item Distance computation takes $\mathcal{O}(n^2 \max(p,q))$ \item Centering takes $\mathcal{O}(n^2)$ \item Ranking takes $\mathcal{O}(n^2 log(n))$ \item \textbf{All local correlations can be iteratively computed in $\mathcal{O}(n^2)$} \item The smoothed maximum takes $\mathcal{O}(n^2)$ \item Storage requirement is $\mathcal{O}(n^2)$ \end{itemize} \pause Overall, \Mgc~can be computed in $\mathcal{O}(n^2 \max(p,q,\log n))$. Without the ranking process, the global correlation (Dcorr) waives the $\log n$ part and takes $\mathcal{O}(n^2 \max(p,q))$. \pause \medskip The permutation test takes $\mathcal{O}(n^2 \max(r,p,q,\log n))$ for $r$ random permutations. \pause \medskip On a standard PC with Matlab, testing $n=1000$ takes about $1$ minutes. \end{frame} \begin{frame}{Examples} \pause \makebox[\textwidth][c]{% \begin{tikzpicture}[ outpt/.style={->,OutCirc,very thick}, >=stealth, every node/.append style={align=left}] \node (source) at (0,0)[]{\includegraphics[width=.22\textwidth]{linear.png}}; \node (source2)[below=of source]{\includegraphics[width=.22\textwidth]{spiral.png}}; \pause \node (kaela)[right=of source] {\includegraphics[width=.28\textwidth]{Fig1B.png}}; \draw[outpt](source)--(kaela); \pause \node (result)[right=of kaela] {$Dcorr(\mathcal{X}_{n},\mathcal{Y}_{n}) =0.15$ \\ $MGC(\mathcal{X}_{n},\mathcal{Y}_{n})=0.15$ \\ p-vals: $<0.001$}; \draw[outpt](kaela)--(result); \pause \node (kaela2)[right=of source2] {\includegraphics[width=.28\textwidth]{Fig8B.png}}; \draw[outpt](source2)--(kaela2); \pause \node (result2)[right=of kaela2] {$Dcorr(\mathcal{X}_{n},\mathcal{Y}_{n}) =0.01$ \\ $MGC(\mathcal{X}_{n},\mathcal{Y}_{n})=0.13$ \\ p-vals: $0.3$ vs $<0.001$}; \draw[outpt](kaela2)--(result2); \end{tikzpicture} } \end{frame} \begin{frame}{\Mgc~is applicable to similarity / kernel matrix} %1. $MGC= Mantel=Dcorr=1$ against linear relationships; \\ %\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $=0$ against independence.\\ %\pause \begin{thm}[Transforming kernel to distance] Given any kernel function $k(\cdot,\cdot)$, define an induced semi-metric as \begin{center} $d(i,j)=1-k(i,j) / \max_{i,j=1,\ldots,n}\{k(i,j)\}$. \end{center} %Then $d(\cdot,\cdot)$ is of strong negative type, and the resulting MGC is universally consistent. \end{thm} \pause \medskip Namely, given a sample kernel matrices $K_{n \times n}$, one can compute the induced distance matrix by \begin{center} $D=J-K/\max_{i,j \in [1,\ldots,n]^2}\{K(i,j)\}$, \end{center} and apply MGC or other distance-based correlation to the induced distance matrices.\\ \pause \medskip The kernel correlation HSIC is equivalent to distance correlation. \end{frame} %\begin{frame}{Advantages of \Mgc} %3. *The local correlations provide an encoding of the dependency structure. %\pause %\bigskip %4. Efficient implementation in $\mathcal{O}(n^2 log n)$. %\pause %\bigskip %MGC shares the same intrinsic idea as in nonlinear embedding, random forest, deep learning. %\end{frame} \section{Theoretical Properties} \begin{frame}{Basic Properties of Sample \Mgc} \begin{thm}[Well-behaved Correlation Measure] \pause 1. Boundedness: $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n}) \in [-1,1]$.\\ \pause \medskip 2. Symmetric: $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n}) =c^{*}(\mathcal{Y}_{n},\mathcal{X}_{n})$.\\ \pause \medskip 3. Invariant: $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n})$ is invariant to any distance-preserving transformations $\phi,\delta$ applied to $\mathcal{X}_{n}$ and $\mathcal{Y}_{n}$ each (i.e., rotation, scaling, translation, reflection).\\ \pause \medskip 4. 1-Linear: $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n})=1$ if and only if $F_{X}$ is non-degenerate and $(X, u Y)$ are dependent via an isometry for some non-zero constant $u$.\\ \end{thm} \end{frame} \begin{frame}{Consistency of Sample \Mgc} \begin{thm}[Consistency] \pause 1. 0-Indep: $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n}) \stackrel{n \rightarrow \infty}{\rightarrow}0$ if and only if independence.\\ \pause \medskip 2. Valid Test: Under the permutation test, Sample \Mgc~is a valid test, i.e., it controls the type 1 error level $\alpha$.\\ \pause \medskip 3. Consistency: At any type 1 error level $\alpha$, testing power $\beta(c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n})) \stackrel{n \rightarrow \infty}{\rightarrow} 1$ against any dependent $F_{\mbx \mby}$.\\ \end{thm} \pause \medskip The distance correlation also shares the same properties. \end{frame} \begin{frame}{Defining Population \Mgc} %\begin{defi*} Suppose $(\mbx,\mby), (\mbx',\mby'), (\mbx'',\mby''), (\mbx''',\mby''')$ are \emph{iid} as $F_{XY}$. \pause Let $\mb{I}(\cdot)$ be the indicator function, define two random variables \begin{align*} \mb{I}_{\mbx,\mbx'}^{\rho_{k}} &=\mb{I}(\int_{B(\mbx,d(\mbx',\mbx))} dF_\mbx(u) \leq \rho_k) \\ \mb{I}_{\mby',\mby}^{\rho_{l}} &=\mb{I}(\int_{B(\mby',d(\mby'-\mby))} dF_\mby(u) \leq \rho_l) \end{align*} for $\rho_{k},\rho_{l} \in [0,1]$. \pause Further define \begin{align*} g^{\rho_{k}}_{\mbx} &=(d(\mbx,\mbx') - d(\mbx,\mbx'')) \mb{I}_{\mbx,\mbx'}^{\rho_{k}} \\ g^{\rho_{l}}_{\mby'} &=(d(\mby',\mby) - d(\mby',\mby''')) \mb{I}_{\mby',\mby}^{\rho_{l}} \end{align*} \pause The population local covariance can be defined as \begin{align*} %\label{eq:dcov2} Dcov^{\rho_{k}, \rho_{l}}(\mbx,\mby) = E(g^{\rho_{k}}_{\mbx} g^{\rho_{l}}_{\mby'}) - E(g^{\rho_{k}}_{\mbx}) E(g^{\rho_{l}}_{\mby'}). \end{align*} \pause Normalizing and taking a smoothed maximum yield population \Mgc. %\end{defi*} \end{frame} \begin{frame}{Sample to Population} \pause Under the Euclidean distance, the population version can be equivalently defined via an integral of characteristic functions of $F_{XY}-F_{X}F_{Y}$ with respect to a non-negative weight function $w(t,s)$. \pause \medskip For general metric or kernel function $d(\cdot,\cdot)$, the population version can also be defined as the integral of $d(X,X')d(Y,Y')$ with respect to $(F_{XY}-F_{X}F_{Y})(F_{X'Y'}-F_{X'}F_{Y'})$. \pause \medskip When the metric is of strong negative type or the kernel is characteristic, $c^{*}(X,Y)=0$ if and only if independence. For arbitrary metric or kernel, the if direction is still true but not the only if direction. %Under Euclidean distance, the population version can be equivalently defined via characteristic functions of $F_{XY}$: %\begin{align*} %Dcov^{\rho_{k}=1, \rho_{l}=1}(\mbx,\mby) = \int_{t,s} |g_{XY}(t,s)-g_{X}(t)g_{Y}(s)|^2 dw(t,s) %\end{align*} %with respect to a non-negative weight function $w(t,s)$ on $(t,s) \in \mathbb{R}^{p} \times \mathbb{R}^{q}$. %\pause %The weight function is defined as: %\begin{align*} % w(t,s) &= (d_{p}d_{q} |t|^{1+p}|s|^{1+q})^{-1}, %\end{align*} %where $d_{p}=\frac{\pi^{(1+p)/2}}{\Gamma((1+p)/2)}$ is a non-negative constant tied to the dimensionality $p$, and $\Gamma(\cdot)$ is the complete Gamma function.\\ %\pause %\medskip %Can be similarly adapted to the local correlation. \end{frame} \begin{frame}{Theoretical Advantages of \Mgc} \begin{thm}[Convergence, Mean and Variance] \pause 1. 0-Indep: When the metric is of strong negative type or the kernel is characteristic, $c^{*}(X,Y) =0$ if and only if independence.\\ \pause \medskip 2. Convergence: $c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n}) \stackrel{n \rightarrow \infty}{\rightarrow} c^{*}(X,Y)$.\\ \pause \medskip 3. Almost Unbiased: $E(c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n})) =c^{*}(X,Y)+\mathcal{O}(1/n)$.\\ \pause \medskip 4. Diminishing Variance: $Var(c^{*}(\mathcal{X}_{n},\mathcal{Y}_{n})) =\mathcal{O}(1/n)$.\\ \end{thm} \pause \medskip The last three properties also hold for any local correlation by $(\rho_{k},\rho_{l})=(\frac{k-1}{n-1},\frac{l-1}{n-1})$, as well as the distance correlation, i.e., $k=l=n$. %Same boundedness, symmetric, invariant, 1-linear properties as sample version.\\ \end{frame} \begin{frame}{Theoretical Advantages of \Mgc} %1. $MGC= Mantel=Dcorr=1$ against linear relationships; \\ %\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ $=0$ against independence.\\ %\pause \begin{thm}[Advantages of Population \Mgc~vs Dcorr] \pause 1. For any dependent $F_{XY}$, $c^{*}(X,Y) \geq Dcorr(X,Y)$. \\ \pause \medskip 2. There exists dependent $F_{XY}$ such that $c^{*}(X,Y)>Dcorr(X,Y)$.\\ \end{thm} \pause As \Mgc~and Dcorr share similar variance and same mean under the null, the first moment advantage in the alternative is translated to the testing power. \pause \begin{thm}[Optimal Scale of \Mgc~Implies Geometry Structure] \pause If the relationship is linear (or with independent noise), the global scale is always optimal and $c^{*}(X,Y)=Dcorr(X,Y)$.\\ \pause \medskip Conversely, the optimal scale being local, i.e., $c^{*}(X,Y)>Dcorr(X,Y)$, implies a non-linear relationship. \end{thm} \end{frame} \section{Simulations and Experiments} \begin{frame}{Visualizations of $20$ Simulation Settings} \pause \begin{figure}[ht] \centering \includegraphics[width=1.0\textwidth]{FigSimVisual2} %\caption{Visualization of $\mbx$ vs $\mby$ for the $20$ dependencies at $p=1$ and $n=50$, and compute sample \Mgc~/ DCorr / absolute Pearson's correlation for each.} \label{f:dependencies} \end{figure} \end{frame} \begin{frame}{Testing Power: Linear vs Nonlinear} Power is the probability of rejecting the null when the alternative is true. \pause \begin{figure}[!ht] \centering % \subfigure{ \includegraphics[width=0.5\textwidth,trim={1.5cm 0 0cm 0cm},clip]{FigNoiseT1} % } % \subfigure{ \includegraphics[width=0.5\textwidth,trim={1.5cm 0 0cm 0cm},clip]{FigNoiseT6} % } %\caption{Comparing the power of \Mgc, distance correlation, and Pearson's correlation for testing noisy linear relationship, and noisy quadratic relationships at $p=1$ and $n=50$. Under linear relationship, all three of them are almost the same with Pearson's correlation being negligibly better; while under quadratic relationship, \Mgc~is clearly the best. \textbf{\Mgc~almost loses none in linear / gaussian setting while gains enormously in general dependency setting.} \label{f:noise} \begin{align*} & n=30, p=q=1, \\ & X \sim Uniform(-1,1),\\ & \epsilon \sim Normal(0, noise), \\ & Y=X+\epsilon \mbox{ and } Y=X^{2}+\epsilon. \end{align*} \end{figure} \end{frame} %\begin{frame}{1D Simulation Powers} %\begin{figure}[htbp] %\includegraphics[width=0.9\textwidth]{Fig1DPowerAll} %\caption{ %Powers of different methods for $20$ different one-dimensional dependence structures for increasing sample size.} %\end{figure} %\end{frame} %\begin{frame}{HD Simulation Powers} %\begin{figure}[htbp] %\includegraphics[width=0.9\textwidth]{FigHDPowerAll} %\caption{ %Powers of different methods for $20$ different increasing-dimensional dependence structures, at $n=100$ and dimensionality increasing from $1$ onwards.} %\end{figure} %\end{frame} %\begin{frame}{Relative Power in 1D} %\pause %\begin{figure}[!ht] %\centering % \subfigure{ %\includegraphics[width=0.75\textwidth,trim={0cm 0.2cm 0cm 0.3cm},clip]{Fig1DPowerSummary.png} % } % \subfigure{ %\includegraphics[width=0.99\textwidth,trim={2cm 0 0cm 0cm},clip]{FigHDPowerSummarySize} % } %\caption{Required sample size of \Mgc~to achieve a power of $85\%$ in 1D and 10D at type 1 error level $5\%$, for each of the $20$ dependencies (except type 20 independent). The median size is reported in the far right column, and \Mgc~is overall the most superior method.} %\label{f:Summary1} %\end{figure} %\end{frame} %\begin{frame}{Relative Power in HD} %\pause %\begin{figure}[!ht] %\centering % \subfigure{ %\includegraphics[width=0.99\textwidth,trim={2cm 0 0cm 0cm},clip]{Fig1DPowerSummarySize} % } % \subfigure{ %\includegraphics[width=0.75\textwidth,trim={0cm 0cm 0cm 0.5cm},clip]{FigHDPowerSummary.png} % } %\caption{Required sample size of \Mgc~to achieve a power of $85\%$ in 1D and 10D at type 1 error level $5\%$, for each of the $20$ dependencies (except type 20 independent). The median size is reported in the far right column, and \Mgc~is overall the most superior method.} %\label{f:Summary2} %\end{figure} %\end{frame} \begin{frame}{Required Sample Size} \pause Required sample size $N_{\alpha,\beta}(c)$ to achieve a power of $\beta$ at type 1 error level $\alpha$ using a statistic $c$. We compute the required sample size $N_{\alpha=0.05,\beta=0.85}(c)$: \\ \medskip \pause in linear relationship, $40$ for all three methods; \\ in quadratic relationship, $80$ for \Mgc, $180$ for Dcorr, and $>1000$ for Pearson.\\ \medskip \pause Next we compute the size for each simulation, and summarize by the median over close-to-linear (type 1-5) and strongly non-linear relationships (type 6-19). \\ \medskip \pause We consider univariate (1D) and multivariate (10D) cases.\\ %Traditional linear correlations (Pearson/RV/CCA/ Spearman/Kendall) always perform the best in monotone simulations, so are the distance-based methods like Dcorr and \Mgc; HHG and HSIC are slightly worse, while MIC and Mantel are the worst. For non-monotone dependencies, traditional correlations fail to detect the existence of dependencies, while \Mgc~is the best approach followed by HHG and HSIC. \end{frame} \begin{frame}{Median Size Table} \begin{tabular}{|l||c|c|c|c|} \hline Testing Methods & 1D Lin & 1D Non-Lin & 10D Lin & 10D Non-Lin \\ \hline % Oracle \Mgc & \textbf{50} & 60 & \textbf{70} & \textbf{135} \\ % \hline \textcolor{UniOrange}{MGC} & \textbf{50} & \textbf{90} & 60 & \textbf{165} \\ \hline Dcorr & \textbf{50} & 250 & 60 & 515 \\ %\hline % Mantel & 70 & 180 & 165 & 270\\ \hline Pearson / RV / CCA & \textbf{50} & $>$1000 & \textbf{50} & $>$1000 \\ \hline HHG & 70 & \textbf{90} & 100 & 315 \\ \hline HSIC & 70 & 95 & 100 & 400 \\ %\hline %Spearman & \textbf{50} & n/a & $>$1000 & n/a \\ %\hline %Kendall & \textbf{50} & n/a & $>$1000 & n/a \\ \hline MIC & 120 & 180 & n/a & n/a \\ %\hline %CCA & \textbf{50} & \textbf{50} & $>$1000 & $>$1000 \\ \hline \end{tabular} \end{frame} \begin{frame}{Extracting Signal Brain Region from fMRI images} \pause We consider predicting the site and sex based on functional magnetic resonance image (fMRI) graphs. Two datasets used are SWU4 and HNU1, which have $467$ and $300$ samples respectively. \\ \medskip Each sample is an fMRI scan registered to the MNI152 template using the Desikan altas, which has $70$ regions. They are transformed to graph structure using the NeuroData’s MRI Graphs pipeline \footnote{\url{https://github.com/neurodata/ndmg}}. \medskip We compute the dependency measure between each brain region and sex. Rank the brain region via magnitude of the measure, and include all significant ($p-val<0.05$) brain regions. Then run leave-one-out cross validation with $K$-Nearest Neighbor classifier to verify the results. Repeat it for the site property. %Subsequent literature searches reveal that neurogranin is a potentially valuable biomarker because it is exclusively expressed in brain tissue among normal tissues and has not been linked with any other cancer type. %\pause %\medskip %More details and other real data experiments can be found in \cite{ShenEtAl2016}. \end{frame} %\begin{frame}{P-value of Each Feature} %\pause %\begin{figure}[htbp] %\includegraphics[width=0.6\textwidth]{FigReal1.png} %\caption{X-axis is the p-value of each peptide between normal and pancreatic. Y-axis is the p-value between pancreatic and all other types. \Mgc~uniquely identifies Neurogranin.} %\label{f:realA} %\end{figure} %\end{frame} \begin{frame} \begin{figure}[!ht] \centering \includegraphics[width=4.3in]{brain0.jpg} \label{fig:study} \end{figure} \end{frame} \begin{frame} \begin{figure}[!ht] \centering \includegraphics[width=3.5in]{brain.png} \label{fig:study} \end{figure} \end{frame} \begin{frame} \begin{figure}[!ht] \centering \includegraphics[width=3.5in]{batch_sex_cor_error.png} \label{fig:study} \caption{A total of $22$ regions are recognized for site difference, which maximizes the MGC statistic and almost minimizes the leave-one-out cross validation error. It is no longer the case for sex, for which neither the MGC nor the error are too significant for any size of subgraph.} \end{figure} \end{frame} \section{Summary} \begin{frame}{Summary} \pause Distance-based correlation is valid and universally consistent for testing independence. \Mgc~utilizes the locality principle to achieve better testing power and sheds insight into the dependency structure. \pause \medskip \begin{itemize}[<+->] \item Using a proper distance or kernel ensures the universal consistency. \item Compute all local correlations iteratively and finds the optimal one can help the testing power when sample size is limited. \item The optimal scale gives information on linear vs nonlinear dependency. \item It can be used in a variety of applications to replace the Pearson's correlation. \end{itemize} %\pause %\medskip %In this talk we went over the tip of the iceberg; there are a lot more fascinating details and applications in the manuscripts. \end{frame} \begin{frame}{Advantages of \Mgc} \pause 1. Performant under any joint distribution of finite second moments: \pause \begin{itemize}[<+->] \item Equals $0$ asymptotically if and only if independence. \item \textcolor{UniOrange}{Amplify the dependency signal while mostly avoiding the sample bias.} \item Superior finite-sample performance over all benchmarks, against linear / nonlinear / noisy / high-dimensional relationships. \end{itemize} \pause \medskip 2. It works for: \pause \begin{itemize}[<+->] \item Low- and high-dimensional data. \item Euclidean and structured data (e.g., images, networks, shapes). \item Any dissimilarity / similarity / kernel matrix. \end{itemize} \pause \medskip 3. Intuitive to understand and efficient to implement in $\mathcal{O}(n^2 log n)$. %\pause %\medskip %MGC shares the same intrinsic idea as in nonlinear embedding, random forest, multiple kernel learning, deep learning. \end{frame} \begin{frame}{Some Recent Advances in Computation} In practice: \pause \medskip \medskip 1. Distance correlation and MGC can now be tested without resorting to permutation (similar to Pearson's t-test). \pause \medskip \medskip 2. When $p=q=1$ and using Euclidean distance, there is a special fast implementation of distance correlation. The running time becomes $O(n \log n)$ and storage requirement becomes $O(n)$, making it ideal and scalable to millions and billions of observations. (less than $10$ seconds for $1$ million observations on a standard PC using Matlab) \pause \medskip \medskip Thus when $n$ is small (say less than a few thousands), \Mgc~is the better choice; whereas distance correlation can better handle extremely large data. \end{frame} \begin{frame}{Open Source Packages} Python package in \url{https://github.com/neurodata/mgcpy/} and forthcoming in scikit-learn\\ \bigskip R package in \url{https://github.com/neurodata/MGC/} and CRAN\\ \bigskip Matlab code \url{https://github.com/neurodata/mgc-matlab} \end{frame} %\begin{frame}{Current Works} %\pause %\begin{itemize}[<+->] %\item The sample method, algorithmic details, simulation advantages and real applications are demonstrated in [\textit{Shen et al.(2017a)}]\cite{ShenEtAl2016}. %\item Population \Mgc~and most mathematical properties appear in [\textit{Shen et al.(2017b)}]\cite{ShenEtAl2018}. %\item \Mgc~is infused with diffusion maps for testing between graph vertices and attributes [\textit{Lee et al.(2017)}]\cite{Lee2017}. %\item \Mgc~is utilized for iterative signal subgraph extraction in [\textit{Wang et al.(2018)}]\cite{Wang2017}. %\if1\blind{ %\item The local correlation map characterizes the dependency structure \cite{ShenEtAl2019}. %\item \Mgc~is an ideal choice for K-sample testing \cite{ShenEtAl2019b}. %}\fi %\end{itemize} %\end{frame} %\begin{frame}{Future Directions} %\pause %\begin{itemize}[<+->] %\item Feature selection / dimension reduction. %\item Magnitude of \Mgc~versus prediction and classification error. %\item Better classification / regression in multiple graph setting. %\item Rank or Kernel \Mgc; interpretation of the optimal local scale under RKHS framework. %\item Relative efficiency among \Mgc, Dcorr, Mantel, and HSIC. %\item Faster \Mgc~testing. %, sub-sampling performance guarantee in big data, and approximated null distribution to derive p-value without permutation test. %\item Real data applications. %: neurodata vs phenotypes, genotypes vs phenotypes, social networks vs attributes, plane sensor data vs mechanical issue, etc. %\end{itemize} %\end{frame} %\section{Method} %\begin{frame}{Distance Matrices} %\pause %\textbf{Input:} Given pairs of observations $(x_{i},y_{i}) \in \Real^{p} \times \Real^{q}$ for $i=1,\ldots,n$, denote $X_{n}=[x_{1},\ldots,x_{n}]$ as the data matrix with each column representing one sample observation, and similarly $Y_{n}$. \\ %\pause %\medskip %\textbf{Distance Computation: } Let $\tilde{A}$ be the $n \times n$ Euclidean distance matrices of $X_{n}$: %\begin{align*} %\tilde{A}_{ij}=\|x_{i}-x_{j}\|_{2}, %\end{align*} %and similarly $\tilde{B}$.\\ %\pause %\medskip %Alternatively, one can directly input two distance / dissimilarity matrices. %For $p=q=1$, the sample Pearson's covariance equals %\begin{equation} %cov(X,Y)=\frac{1}{n-1}\sum_{i=1}^{n}(x_{i}-\overline{x})(y_{i}-\overline{y}). %\end{equation} %with $\overline{x}$ and $\overline{y}$ being the sample mean. %\pause %\medskip %The variance can be similarly defined as $cov(X,X)$ and $cov(Y,Y)$, and correlation follows as %\begin{equation} %corr(X,Y)=\frac{cov(X,Y)}{\sqrt{cov(X,X) cov(Y,Y)}} \in [-1,1]. %\end{equation} %The RV coefficient (or canonical correlation) generalizes the Pearson's correlation into dimensionality higher than $1$. %\end{frame} %\begin{frame}{Transforming the Distance Matrices} % \pause %\textbf{Centering:} Then we center $\tilde{A}$ and $\tilde{B}$ by columns, with the diagonals excluded: %\begin{equation} %\label{localCoef2} % A_{ij}= % \begin{cases} % \tilde{A}_{ij}-\frac{1}{n-1}\sum_{s=1}^{n} \tilde{A}_{sj}, & \text{if $i \neq j$}, \\ % 0, & \text{if $i=j$}; % \end{cases} %\end{equation} %similarly for $B$. %\pause %\medskip %The distance covariance statistic by \textit{Szekely et al.(2007)} equals the mean of the entri-wise product of $A$ and $B^{T}$, i.e., %\begin{equation} %dcov(X,Y)=\frac{1}{(n-1)^2}\sum_{i,j=1}^{n}A_{ij} B_{ji}. %\end{equation} %Similarly one can define distance variance, and then distance correlation in $[-1,1]$. %While Multiscale Generalized Correlation (\Mgc) only consider local distances, i.e., calculate the correlation between two sparse matrices based on nearest-neighbor. %\end{frame} %\begin{frame}{Examples} %A few examples of $\G$: %\begin{itemize}[<+->] %\item The Pearson's product-moment correlation coefficient by taking $a_{ij}=x_i$ and $b_{ij}=y_i$. %\item The Spearman and Kendall's rank correlations by setting $a_{ij}$ to be $rank(x_i)-rank(x_j)$ and $sign(x_i-x_j)$ respectively. %\item The Mantel coefficient [\textit{Mantel (1967)}]\cite{Mantel1967} by using $a_{ij}=|x_i-x_j|_{2}$ (i.e. Euclidean distance). %\item The distance correlation [\textit{Szekely et al.(2007)}]\cite{SzekelyRizzoBakirov2007} by using the doubly-centered distance entries for $a_{ij}$ and $b_{ij}$. %\item The modified distance correlation [\textit{Szekely and Rizzo (2013)}] \cite{SzekelyRizzo2013a} by slightly tweaking $a_{ij}/b_{ij}$ of dcorr. %\end{itemize} %\end{frame} %\begin{frame}{Incorporating the Locality Principle} %\pause %\textbf{Ranking:} Define $\{R^{A}_{ij}\}$ as the ``rank'' of $x_i$ relative to $x_j$, that is, $R^{A}_{ij}=k$ if $x_i$ is the $k^{th}$ closest point (or ``neighbor'') to $x_j$, as determined by ranking the set $\{\tilde{A}_{1j},\tilde{A}_{2j},\ldots,\tilde{A}_{nj}\}$ by ascending order. Similarly define $R^{B}_{ij}$ for the $y$'s. %\pause %\medskip %For any $(k,l) \in [n]^2$, define the rank truncated matrices $A^{k}, B^{l}$, and the joint distance matrix $C^{kl}$ as %\begin{align*} %A_{ij}^{k} &=A_{ij} \mb{I}(R^{A}_{ij} \leq k), \\ %B_{ji}^{l} &=B_{ji} \mb{I}(R^{B}_{ji} \leq l), \\ %C^{kl}_{ij} &= A_{ij}^{k} \times B_{ji}^{l}, %\end{align*} %where the subscript of $B$ is purposely switched. %\pause %\medskip %When ties occur, minimal rank is used, e.g., if $\mby$ only takes two value, $R^{B}_{ij}$ takes value in $\{1,2\}$ only. We assume no ties for each of presentation. %\end{frame} %\begin{frame}{Local Distance Correlations} %\pause %\textbf{A Family of Local Correlations:} %Finally, we compute the sample local covariance, variance, and correlation between the sample observations $X_{n}$ and $Y_{n}$ as follows: %\pause %\begin{align*} %dCov^{kl}(X_{n},Y_{n}) &= \E(C^{kl}_{ij})- \E(A^{k}_{ij})\E(B^{l}_{ij}),\\ %dVar^{k}(X_{n}) &=\E(A^{k}_{ij} A^{k}_{ji})- \E^2(A^{k}_{ij}), \\ %dVar^{l}(Y_{n}) &=\E(B^{l}_{ij} B^{l}_{ji})- \E^2(B^{l}_{ij}), \\ %dCorr^{kl}(X_{n},Y_{n}) &=dCov^{kl}(X,Y) / \sqrt{dVar^{k}(X) \cdot dVar^{l}(Y)}. %\end{align*} %for $k,l=1,\ldots,n$, and $\E(\cdot)=\frac{1}{n(n-1)}\sum_{i \neq j}^{n} (\cdot)$ denotes the diagonal-excluded sample mean of a square matrix. If $dVar^{k}(X_{n}) \cdot dVar^{l}(Y_{n}) \leq 0$, we set $dCorr^{kl}(X_{n},Y_{n})=0$ instead. %\pause %\medskip %There are a maximum of $n^2$ different local correlations. At $k=l=n$, $dCorr^{kl}(X_{n},Y_{n})$ equals the ``global'' distance correlation $dCorr(X_{n},Y_{n})$ by \textit{Szekely et al.(2007)}. %\end{frame} %\begin{frame}{MGC} %\pause %\textbf{MGC as optimal local correlation:} In $\{dCorr^{kl}(X_{n},Y_{n})\}$, we shall take the ``optimal'' local correlation as the \Mgc~statistic $\G^{*}(X_{n},Y_{n})$. %\pause %\medskip %However, directly taking the maximum local correlation $\max_{(k,l) \in [n]^2}\{dCorr^{k,l}(X_{n},Y_{n})\}$ will yield a biased statistic under independence, i.e., the maximum is always larger than $0$ in expectation even under independent relationship. %\pause %\medskip %Instead, we take a smoothed maximum by: %\begin{itemize}[<+->] %\item Pick a threshold $\tau \geq 0$; %\item Compute the largest connected component $R=\{(k,l)$ such that $dCorr^{kl}(X_{n},Y_{n})>\max\{\tau, dCorr^{nn}(X_{n},Y_{n})\} \}$; %\item Within the significant region $R$, set $\GG^{*}(X_{n},Y_{n})=\max_{ (k,l) \in R} \{dCorr^{k,l}(X_{n},Y_{n})\}$; %\item If the number of elements in $R$ is less than $2n$, or the $\GG^{*}(X_{n},Y_{n})$~is no more than $dCorr^{nn}(X_{n},Y_{n})$, take $\GG^{*}(X_{n},Y_{n})=dCorr^{nn}(X_{n},Y_{n})$ instead. %\end{itemize} %\end{frame} %\begin{frame}{Permutation Test} %\pause %The choice of threshold $\tau$ is determined via the approximate distribution of distance correlation under independence. $\tau$ has negligible effect on theoretical properties of \Mgc~but is somewhat important for limited-sample performance. More details on smoothing can be found in [\textit{Shen et al.(2017b)}]\cite{ShenEtAl2018}. %\pause %\medskip %To get a p-value by \Mgc~for any given data, we utilize the permutation test, i.e., randomly permute the second data set, and take the p-value as the percentage that the original \Mgc~is no larger than the permuted \Mgc~statistic. %\pause %\medskip %This is a common nonparametric testing procedure employed by all of Mantel, Dcorr, HHG, HSIC. %\end{frame} %\begin{frame}{Computation Complexity} %Distance computation takes $\mathcal{O}(n^2 \max(p,q))$, centering takes $\mathcal{O}(n^2)$, ranking takes $\mathcal{O}(n^2 log(n))$, \textbf{all local correlations can be iteratively computed in $\mathcal{O}(n^2)$}, and the smoothing step takes $\mathcal{O}(n^2)$. %\pause %\medskip %Overall, \Mgc~can be computed in $\mathcal{O}(n^2 \max(p,q,log(n)))$, which is comparable to DCorr, HHG, and HSIC. %\pause %\medskip %The permutation test takes $\mathcal{O}(n^2 \max(r,p,q,log(n)))$ for $r$ random permutations. %\end{frame} %\begin{frame}{Illustration of MGC vs DCorr} %\begin{figure}[htbp] %\includegraphics[width=1.0\textwidth]{Fig1All.png} %\end{figure} %\end{frame} %\section{Theory} %\begin{frame}{Population Form} %\pause %Assume that $(x_{i},y_{i}) \stackrel{i.i.d.}{\sim} F_{\mbx \mby}$ for all $i$, we can define the population local covariance by $dCov^{\rho_k,\rho_l}(\mbx,\mby)$ for $\rho_{k},\rho_{l} \in [0,1]$ via the characteristic functions of the underlying random variables, or Euclidean distances between pairs of i.i.d. random variables. %\pause %\medskip %Similarly one can cast local variances / correlations / \Mgc~into the population form. %\pause %\medskip %The population \Mgc~is directly connected to the sample version, and facilitates a number of desirable properties. %See [\textit{Shen et al.(2017b)}]\cite{ShenEtAl2018} for more details. %\end{frame} %\begin{frame}{Population Form} %\pause %\begin{defi*} %Let $\mb{I}(\cdot)$ be the indicator function, define two random variables %\begin{align*} %\mb{I}_{\mbx,\mbx'}^{\rho_{k}} &=\mb{I}(Prob\{B(\mbx,\|\mbx'-\mbx\|)\} \leq \rho_{k}) \\ %\mb{I}_{\mby',\mby}^{\rho_{l}} &=\mb{I}(Prob\{B(\mby',\|\mby-\mby'\|)\} \leq \rho_{l}) %\end{align*} %with respect to the balls $B(\mbx,\|\mbx'-\mbx\|)$ and $B(\mby',\|\mby-\mby'\|)$ centered at $\mbx$ and $\mby'$ respectively. %Suppose $(\mbx,\mby),(\mbx',\mby'),(\mbx'',\mby''),(\mbx''',\mby''')$ are i.i.d. as $F_{\mbx \mby}$, and define %\begin{align*} %d^{\rho_{k}}_{\mbx} &=(\| \mbx-\mbx' \| - \|\mbx-\mbx''\|) \mb{I}_{\mbx,\mbx'}^{\rho_{k}}, \\ %d^{\rho_{l}}_{\mby'} &=(\| \mby'-\mby \| - \|\mby'-\mby'''\|) \mb{I}_{\mby',\mby}^{\rho_{l}}. %\end{align*} %Then the population local covariance equals %\begin{align} %\label{eq:dcov2} %dCov^{\rho_k, \rho_l}(\mbx,\mby) = E(d^{\rho_{k}}_{\mbx} d^{\rho_{l}}_{\mby'}) - E(d^{\rho_{k}}_{\mbx}) E(d^{\rho_{l}}_{\mby'}). %\end{align} %\end{defi*} %\end{frame} %\begin{frame}{Properties of Population Local Correlation} %\pause %\begin{thm} %\label{thm2} %The population local correlation satisfies the following: %\begin{description} %\item [(a)] For any $(\rho_k,\rho_l) \in [0,1] \times [0,1]$, $dCorr^{\rho_{k},\rho_{l}}(\mbx,\mby)=dCorr^{\rho_{l},\rho_{k}}(\mby,\mbx) \in [-1,1]$. %\item [(b)] If $\mbx$ is non-degenerate and $(\mbx, \mby)$ are dependent via a linear transformation (i.e., scaling, translation, rotation, reflection), then $dCorr^{\rho_{k},\rho_{l}}(\mbx,\mby)=1$ for all $\rho_k=\rho_l \in (0,1]$. %\item [(c)] Under fixed marginals, $dCov^{\rho_{k},\rho_{l}}(\mbx,\mby)$ is a scalar multiple of $dCorr^{\rho_{k},\rho_{l}}(\mbx,\mby)$ regardless of the joint distribution, for any fixed $(\rho_k,\rho_l) \in [0,1] \times [0,1]$. %\item [(c)] At $(\rho_k,\rho_l)=(1,1)$, $dCorr^{\rho_{k},\rho_{l}}(\mbx,\mby) = dCorr(\mbx,\mby)$. %\item [(d)] For any $(\rho_k,\rho_l) \in [0,1] \times [0,1]$, $dCorr^{\rho_{k},\rho_{l}}(\mbx,\mby) = 0$ under the null (i.e., $\mbx$ is independent of $\mby$). %\end{description} %\end{thm} %\pause %\medskip %(a-c) are also satisfied by the sample version $dCov^{kl}(X_{n},Y_{n})$, while (d) holds asymptotically for the sample version. These properties are also inherited by \Mgc~as a smoothed maximal local correlation. %\end{frame} %\begin{frame}{Convergence property} %\pause %We prove that the sample version converges to the population version as $n$ increases, and establish the convergence rate. %\pause %\medskip %\begin{thm} %\label{thm4} %Suppose each column of $X_{n}$ and $Y_{n}$ are i.i.d. as $\mbx$ and $\mby$ respectively with finite second moments, sample local covariance satisfies %\begin{align*} %E(dCov^{kl}(X_{n},Y_{n})) &= dCov^{\rho_{k},\rho_{l}}(\mbx,\mby) +\mathcal{O}(1/n) \\ %Var(dCov^{kl}(X_{n},Y_{n})) &= \mathcal{O}(1/n)\\ %dCov^{kl}(X_{n},Y_{n}) &\stackrel{n \rightarrow \infty}{\rightarrow} dCov^{\rho_{k},\rho_{l}}(\mbx,\mby), %\end{align*} %where $\rho_{k}=\frac{k-1}{n-1}$ and $\rho_{l}=\frac{l-1}{n-1}$. In particular, the convergence is uniform for all sample local covariances, and also holds for the local correlations. %\end{thm} %\end{frame} %\begin{frame}{Consistency of MGC} %We can similarly define the population version of \Mgc, prove sample \Mgc~converges to the population \Mgc, and then testing consistency.\\ %\pause %\medskip %\begin{thm} %\label{thm8} %Suppose each column of $X_{n}$ and $Y_{n}$ are i.i.d. as $\mbx$ and $\mby$ of finite second moments, then $\GG^{*}(X_{n},Y_{n}) \geq 0$ asymptotically with equality if and only if independence. Therefore, at any type $1$ error level $\alpha>0$, \Mgc~is a valid test statistic that is consistent against all possible alternatives under the permutation test. %\end{thm} %\end{frame} %\begin{frame}{Advantage of MGC over Dcorr} %\pause %Whenever there exist some local correlations that are larger than global distance correlation (1D non-monotone relationships), we prove that sample \Mgc~is larger than Dcorr as sample size increases.\\ %\pause %\medskip %\begin{thm} %\label{thm7} %Suppose each column of $X_{n}$ and $Y_{n}$ are i.i.d. as continuous $\mbx$ and $\mby$ respectively, and %\begin{align*} %\max_{ (\rho_k,\rho_l) \in [0,1] \times [0,1]} \{dCorr^{\rho_{k},\rho_{l}}(\mbx,\mby)\} > Dcorr(\mbx, \mby). %\end{align*} %Then for $n$ sufficiently large, it holds that $\GG^{*}(X_{n}, Y_{n}) > Dcorr(X_{n}, Y_{n})$ for any threshold choice $\tau \rightarrow 0$. %\end{thm} %\pause %\medskip %Together with the smoothing step that mitigates the sample bias of \Mgc~under the null, the advantage on test statistic is translated to better finite-sample testing power of \Mgc~for nonlinear relationships. \\ %\end{frame} %tba: add PIE and WIKI GF data %\begin{frame}[allowframebreaks] %\frametitle{References} %\tiny %\bibliographystyle{ieeetr} %\bibliography{references} %\end{frame} \begin{frame}%[allowframebreaks] \frametitle{References} \small 1. \textcolor{UniOrange}{C. Shen}, C. E. Priebe, and J. T. Vogelstein, ``From distance correlation to the multiscale graph correlation," Journal of the American Statistical Association, 2019.\\ \bigskip 2. J. T. Vogelstein, E. Bridgeford, Q. Wang, C. E. Priebe, M. Maggioni, and \textcolor{UniOrange}{C. Shen}, ``Discovering and Deciphering Relationships Across Disparate Data Modalities," eLife, 2019.\\ \bigskip 3. Y. Lee, \textcolor{UniOrange}{C. Shen}, and J. T. Vogelstein, ``Network dependence testing via diffusion maps and distance-based correlations," Biometrika, 2019.\\ \bigskip 4. S. Wang, \textcolor{UniOrange}{C. Shen}, A. Badea, C. E. Priebe, and J. T. Vogelstein, ``Signal subgraph estimation via iterative vertex screening," under review.\\ \bigskip 5. \textcolor{UniOrange}{C. Shen} and J. T. Vogelstein, ``The Exact Equivalence of Distance and Kernel Methods for Hypothesis Testing," under review.\\ \if1\blind { \bigskip 5. \textcolor{UniOrange}{C. Shen} and J. T. Vogelstein, ``Characterizing dependency structure by the MGC image," \textit{in prep}.\\ \bigskip 6. \textcolor{UniOrange}{C. Shen}, Y. Lee, C. E. Priebe, and J. T. Vogelstein, ``K-sample testing via multiscale graph correlation," \textit{in prep}. }\fi %\bibliographystyle{ieeetr} % \end{frame} \addtocounter{framenumber}{-1} \begin{frame}<0> \bibliography{MGCbib} \end{frame} %\bibliography{MGCbib} %------------------------------------------------ %---------------------------------------------------------------------------------------- \end{document}
{ "alphanum_fraction": 0.6863171753, "avg_line_length": 43.9269076305, "ext": "tex", "hexsha": "54d686c76c3ab8ca3890ecf5cecedd4445d2a115", "lang": "TeX", "max_forks_count": 4, "max_forks_repo_forks_event_max_datetime": "2021-07-28T05:57:41.000Z", "max_forks_repo_forks_event_min_datetime": "2018-12-06T20:31:12.000Z", "max_forks_repo_head_hexsha": "20c5ef66271e36771c71443c0d7824298534e441", "max_forks_repo_licenses": [ "Apache-2.0" ], "max_forks_repo_name": "neurodata/mgc-matlab", "max_forks_repo_path": "Presentation/MGC Presentation.tex", "max_issues_count": 1, "max_issues_repo_head_hexsha": "20c5ef66271e36771c71443c0d7824298534e441", "max_issues_repo_issues_event_max_datetime": "2018-11-13T22:07:24.000Z", "max_issues_repo_issues_event_min_datetime": "2018-11-12T17:24:26.000Z", "max_issues_repo_licenses": [ "Apache-2.0" ], "max_issues_repo_name": "neurodata/mgc-matlab", "max_issues_repo_path": "Presentation/MGC Presentation.tex", "max_line_length": 476, "max_stars_count": 5, "max_stars_repo_head_hexsha": "20c5ef66271e36771c71443c0d7824298534e441", "max_stars_repo_licenses": [ "Apache-2.0" ], "max_stars_repo_name": "neurodata/mgc-matlab", "max_stars_repo_path": "Presentation/MGC Presentation.tex", "max_stars_repo_stars_event_max_datetime": "2021-05-06T02:42:45.000Z", "max_stars_repo_stars_event_min_datetime": "2018-10-25T01:50:21.000Z", "num_tokens": 18173, "size": 54689 }
\vsssub \subsubsection{The NetCDF input field preprocessor } \label{sec:ww3prnc} \vsssub \proddefH{ww3\_prnc}{w3prnc}{ww3\_prnc.ftn} \proddeff{Input}{ww3\_prnc.nml}{Namelist configuration file.}{10} (App.~\ref{sec:config062}) \proddefa{ww3\_prnc.inp}{Traditional configuration file.}{10} (App.~\ref{sec:config061}) \proddefa{mod\_def.ww3}{Model definition file.}{11} \proddefa{'user input'\opt}{See example below.}{user} \proddeff{Output}{standard out}{Formatted output of program.}{6} \proddefa{level.ww3\opt}{Water levels file.}{12} \proddefa{current.ww3\opt}{Current fields file.}{12} \proddefa{wind.ww3\opt}{Wind fields file.}{12} \proddefa{ice.ww3\opt}{Ice fields file.}{12} \proddefa{data0.ww3\opt}{Assimilation data (`mean').}{12} \proddefa{data1.ww3\opt}{Assimilation data (`1-D spectra').}{12} \proddefa{data2.ww3\opt}{Assimilation data (`2-D spectra').}{12} \vspace{\baselineskip} \vspace{\baselineskip} \noindent See note at the end of the previous section (\ref{sec:ww3prep}) for tools that can be used to pack input files in custom programs. \pb
{ "alphanum_fraction": 0.7417840376, "avg_line_length": 40.9615384615, "ext": "tex", "hexsha": "0b264bd4068223fda5b795f84c0dc53b657e0993", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2021-06-01T09:29:46.000Z", "max_forks_repo_forks_event_min_datetime": "2021-06-01T09:29:46.000Z", "max_forks_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_forks_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_forks_repo_name": "minsukji/ci-debug", "max_forks_repo_path": "WW3/manual/run/ww3_prnc.tex", "max_issues_count": 5, "max_issues_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_issues_repo_issues_event_max_datetime": "2021-06-04T14:17:45.000Z", "max_issues_repo_issues_event_min_datetime": "2021-05-31T15:49:26.000Z", "max_issues_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_issues_repo_name": "minsukji/ci-debug", "max_issues_repo_path": "WW3/manual/run/ww3_prnc.tex", "max_line_length": 92, "max_stars_count": null, "max_stars_repo_head_hexsha": "3e8bbbe6652b702b61d2896612f6aa8e4aa6c803", "max_stars_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_stars_repo_name": "minsukji/ci-debug", "max_stars_repo_path": "WW3/manual/run/ww3_prnc.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 380, "size": 1065 }
\chapter{Wave and optics} Differential equation? Diffusion? Oscillation? Wave behavior? This chapter has two purposes: introduce differential equations to the reader, and prepare the reader for quantum mechanics. \section{Wave} % https://en.wikipedia.org/wiki/Wave ("Wave", Wikipedia): A \emph{wave} is an oscillation accompanied by a transfer of energy? A wave is a disturbance that transfers energy through matter or space? \section{Detecting waves} We can detect a wave by a diffraction slit. If it's a wave, it diffracts. We assume the converse: if it diffracts, it's very likely a wave. \emph{Undulation} is an old term for \emph{wave}. \section{Oscillation of a loaded spring} If a spring is loaded, pulled, and released, then it will oscillate. The equation of motion can be derived from Hooke's law of spring restoring force (\S\ref{sec:hooke-s-law}). Wave equation Second-order differential equation Water wave D'Alembert's waves? How do we describe oscillation? Periodic motion? Harmonic motion? How do we describe a diffusion? How do we derive the wave equation from the diffusion equation? % https://en.wikipedia.org/wiki/Fick%27s_laws_of_diffusion % https://en.wikipedia.org/wiki/Continuity_equation % https://en.wikipedia.org/wiki/Diffusion_equation#Derivation Diffusion equation is derived from continuity equation and Fick's laws of diffusion. Assume homogeneous (made of the same thing everywhere) isotropic (behaving the same everywhere) medium? Diffusion: The rate of diffusion is proportional to the gradient? \[ f(x,t+h) - f(x,t) = c \cdot \frac{[f(x - h, t) - f(x,t)] + [f(x + h, t) - f(x,t)]}{2} \] Divide both sides by \(h\) \[ D_t f(x,t) = c \cdot D_x f(x,t) \] \[ \frac{\partial f}{\partial t} = c \cdot \frac{\partial f}{\partial x} \] \[ \frac{\partial f}{\partial t} = \vec{c} \cdot \nabla f \] ??? How do we describe waves? How do we describe waves on a string? Pulse on a string? Pulse on a chain of springs? Replace the springs with more smaller springs? % https://en.wikipedia.org/wiki/D%27Alembert%27s_formula A function \(f\) has \emph{period} \(p\) iff \(f(x+p) = f(x)\) for all \(x\). Let \(f(x,t)\) be the \emph{amplitude} of the wave at position \(x\) and time \(t\). Let the oscillator be at position \(0\). Let \(g\) be an unknown function. Flow: \(f(x,t + dt) - f(x,t) = g(c,f(x,t),f(x-dx,t),f(x+dx,t))\) \(f(x,t + dt) - f(x,t) = [f(x-dx,t)-f(x,t)] + [f(x+dx,t)-f(x,t)]\) \section{Velocities} Propagation velocity Phase velocity Group velocity \section{Light wave} \section{Fermat's principle of least time} Light takes the path that takes the least time. \section{Snell\textendash{}Descartes law of refraction} % https://en.wikipedia.org/wiki/Snell%27s_law % https://en.wikipedia.org/wiki/Snell%27s_law#History \begin{equation} \frac{\sin \theta_1}{\sin \theta_2} = \frac{v_1}{v_2} = \frac{\lambda_1}{\lambda_2} = \frac{n_2}{n_1} \end{equation} Descartes 1637 \emph{Dioptrics}, Huygens 1678: Huygens\textendash{}Fresnel principle. Snell's law can be derived from Fermat's principle? Snell's law can be derived from Huygens\textendash{}Fresnel principle? % https://en.wikipedia.org/wiki/Huygens%E2%80%93Fresnel_principle \section{Optics} \emph{Wavenumber} is? \emph{Wavelength} is? \emph{Frequency} is? \emph{Phase speed} is? \emph{Group velocity} is? \emph{Dispersion relation} is? \emph{Doppler effect} is Expanding universe? \section{Reflection} \section{Diffraction} \section{Diffusion} \section{Dispersion} \section{Interference} \section{Superposition} \section{Fresnel spot} \section{Transversal wave} \section{Longitudinal wave} \section{Young's double-slit experiment} \section{Isochronic oscillation of a pendulum} \section{Camera obscura} \section{Newton's 1672 prism splits white light into colors?} \section{Young's 1803 double-slit experiment}
{ "alphanum_fraction": 0.7266149871, "avg_line_length": 25.1298701299, "ext": "tex", "hexsha": "24a7308d54802fe59eefc7150aab267ed9d7d2c3", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2018-10-02T15:20:22.000Z", "max_forks_repo_forks_event_min_datetime": "2018-10-02T15:20:22.000Z", "max_forks_repo_head_hexsha": "df55868caa436efc631e145a43e833220b8da1d0", "max_forks_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_forks_repo_name": "edom/work", "max_forks_repo_path": "research/physics/wave.tex", "max_issues_count": 4, "max_issues_repo_head_hexsha": "df55868caa436efc631e145a43e833220b8da1d0", "max_issues_repo_issues_event_max_datetime": "2022-02-16T00:55:32.000Z", "max_issues_repo_issues_event_min_datetime": "2020-12-02T18:37:37.000Z", "max_issues_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_issues_repo_name": "edom/work", "max_issues_repo_path": "research/physics/wave.tex", "max_line_length": 107, "max_stars_count": null, "max_stars_repo_head_hexsha": "df55868caa436efc631e145a43e833220b8da1d0", "max_stars_repo_licenses": [ "Apache-2.0", "CC0-1.0" ], "max_stars_repo_name": "edom/work", "max_stars_repo_path": "research/physics/wave.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1159, "size": 3870 }
\startcomponent ma-cb-en-enumerations \product ma-cb-en \chapter{Numbered definitions} \index{numbered definition} \Command{\tex{defineenumeration}} \Command{\tex{setupenumerations}} With \type{\defineenumeration} you can number text elements like remarks or questions. If you want to make numbered remarks in your document you use: \shortsetup{defineenumeration} For example: \startbuffer[a] \defineenumeration [remark] [location=top, text=Remark, inbetween=\blank, after=\blank] \stopbuffer \typebuffer[a] Now the new commands \type{\remark}, \type{\subremark}, \type{\resetremark} and \type{\nextremark} are available and you can type remarks like this: \startbuffer[b] \remark In the early medieval times Hasselt was a place of pilgrimage. The {\em Heilige Stede} (Holy Place) was torn down during the Reformation. In 1930, after 300 years the {\em Heilige Stede} was reopened. \subremark Nowadays the {\em Heilige Stede} is closed again but once a year an open air service is held on the same spot. \par \stopbuffer \typebuffer[b] \start \getbuffer[a]\getbuffer[b] \stop You can reset numbering with \type{\resetremark} or \type{\resetsubremark} or increment a number with \type{\nextremark} or \type{\nextsubremark}. This is normally done automatically per chapter, section or whatever. You can set up the layout of \type{\defineenumeration} with: \shortsetup{setupenumerations} You can also vary the layout of {\bf Remark} and {\bf Subremark} in the example above by: \starttyping \setupenumeration[remark][headstyle=bold] \setupenumeration[subremark][headstyle=slanted] \stoptyping If a number becomes obsolete you can type: \starttyping \remark[-] \stoptyping If the remark contains more than one paragraph you will have to use the command pair \type{\startremark} $\cdots$ \type{\stopremark} that becomes available after defining {\bf Remark} with \type{\defineenumeration[remark]}. So the example above would look like this: \startbuffer[c] \startremark In the early medieval times Hasselt was a place of pilgrimage. The {\em Heilige Stede} (Holy Place) was torn down during the Reformation. After 300 years in 1930 the {\em Heilige Stede} was reopened. Nowadays the {\em Heilige Stede} is closed again but once a year an open air service is held on the same spot. \stopremark \stopbuffer \typebuffer[c] \start \getbuffer[a]\getbuffer[c] \par \stop \stopcomponent
{ "alphanum_fraction": 0.7686258278, "avg_line_length": 23.6862745098, "ext": "tex", "hexsha": "6368b54c7c3c9f5a660400a0bcb3efc9cf8cefd3", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "marcpaterno/texmf", "max_forks_repo_path": "contextman/context-beginners/en/ma-cb-en-enumerations.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "marcpaterno/texmf", "max_issues_repo_path": "contextman/context-beginners/en/ma-cb-en-enumerations.tex", "max_line_length": 69, "max_stars_count": null, "max_stars_repo_head_hexsha": "aa7ad70e0102492ff89b7967b16b499cbd6c7f19", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "marcpaterno/texmf", "max_stars_repo_path": "contextman/context-beginners/en/ma-cb-en-enumerations.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 659, "size": 2416 }
\section{Spans, Linear Independence and Steinitz Exchange Lemma} In this section, we shall characterise the properties of the dimension and basis of a vector space. \begin{definition}[Span of a Family of Vectors] Let $V$ be a vector space over $F$ and $S\subset V$. We define the span of $S$ to be $$\langle S\rangle=\operatorname{span}(S)=\left\{\sum_{i=1}^n\lambda_is_i:n\in\mathbb N,\lambda_i\in F,s_i\in S\right\}$$ \end{definition} That is, $\langle S\rangle$ consists of all possible (finite) linear combination of elements of $S$. By convention, we say $\langle \varnothing\rangle=\{0\}$. Note also that the span of $S$ is essentially the minimal subspace of $V$ containing $S$. \begin{example} 1. Take $V=\mathbb R^3$ and $$S=\left\{\begin{pmatrix} 1\\ 0\\ 0 \end{pmatrix},\begin{pmatrix} 0\\ 1\\ 2\\ \end{pmatrix},\begin{pmatrix} 3\\ -2\\ -4 \end{pmatrix}\right\}$$ then $$\langle S\rangle=\left\{\begin{pmatrix} a\\ b\\ 2b \end{pmatrix}:a,b\in\mathbb R\right\}$$ 2. Take $V=\mathbb R^n$ and let $e_i$ be the vector in $V$ that only has $1$ at the $i^{th}$ entry and zero elsewhere, then $\langle \{e_i\}_{i=1}^n\rangle=V$.\\ 3. Let $V=\mathbb R^X$ and $S_x:X\to\mathbb R$ be such that $S_x(y)=1_{x=y}$. Then $\langle \{S_x\}_{x\in\mathbb R}\rangle$ are the set of functions $f\in\mathbb R^X$ that has finite support. \end{example} \begin{definition} Let $V$ be a vector space over $F$ and $S\subset V$. We say $S$ spans $V$ if $\langle S\rangle =V$. \end{definition} \begin{example} Take $V=\mathbb R^2$, then any set of two non-parallel vectors would span $V$. \end{example} \begin{definition} A vector space $V$ over a field $F$ is finite dimensional if there is a finite $S\subset V$ that spans $V$. \end{definition} \begin{example} The space $V=\mathbb P[x]$ be the set of polynomials in $\mathbb R$ and $V_n=\mathbb P_n[x]$ be the set of real polynomials with degree at most $n$. Then $V_n=\langle\{1,x,\ldots,x^n\}\rangle$ is finite dimensional, but $V$ is not finite dimensional as any finite set of polynomials must be contained in $V_n$ where $n$ is the maximal degree of polynomials in that set. \end{example} As $\mathbb N$ is well-ordered, there must be a minimum number of vectors that can possibly span $V$. We then focus on how to capture this minimality. \begin{definition}[(Linear) Independence] Let $V$ be a vector space over $F$. We say $\{v_1,\ldots,v_n\}\subset V$ are (linearly) independent (or is a free family) if for any $\lambda_1,\ldots,\lambda_n\in F$ $$\sum_{i=1}^n\lambda_iv_i=0\implies\forall i,\lambda_i=0$$ On the other hand, this set is not linearly independent if there exists $\lambda_1,\ldots,\lambda_n\in F$ not all zero such that $\sum_{i=1}^n\lambda_iv_i=0$. \end{definition} \begin{example} Let $V=\mathbb R^3$ and $$v_1=(1,0,0)^\top,v_2=(0,1,0)^\top,v_3=(1,1,0)^\top,v_4=(0,1,1)^\top$$ Then $\{v_1,v_2\}$ is linearly independent. Note that $v_3\in\langle\{v_1,v_2\}\rangle$, so $\{v_1,v_2,v_3\}$ is not linearly independent. On the other hand, $v_4\notin\langle\{v_1,v_2\}\rangle$, which as one can verify means that $\{v_1,v_2,v_4\}$ is linearly independent. \end{example} \begin{remark} If the family $\{v_i\}_{1\le i\le n}$ is linearly independent, then none of $v_i$ is zero. \end{remark} \begin{definition}[Basis] A subset $S\subset V$ is a basis if it is linearly independent and $\langle S\rangle=V$. \end{definition} \begin{remark} When $S$ spans $V$, we say that $S$ is a generating family of $V$. So a basis is just a linearly independent generating family. \end{remark} \begin{example} 1. Take $V=\mathbb R^n$, then the family $\{e_i\}_{1\le i\le n}$ where $e_i$ is the vector having $1$ at $i^{th}$ entry and zero otherwise is a basis.\\ 2. Take $V=\mathbb C$ over $\mathbb C$, then $\{a\}$ is a basis for any $a\neq 0$.\\ 3. Take also $V=\mathbb C$ but over $\mathbb R$, then $\{1,i\}$ is a basis.\\ 4. Take $V=\mathbb P[x]$ be the set of polynomials in $\mathbb R$ and $S=\{x^n:n\ge 0\}$. Then $S$ is a basis. Worth noting that $|S|=\infty$ in this case. \end{example} \begin{lemma} If $V$ is a vector space over $F$, then $\{v_1,\ldots,v_n\}$ is a basis of $V$ if and only if for any vector $v\in V$, there is a unique decomposition $$v=\sum_{i=1}^n\lambda_iv_i$$ \end{lemma} \begin{remark} If the conditions are true, then the tuple $(\lambda_1,\ldots,\lambda_n)$ (ordered via the ordering one chose on $v_i$) is called the coordinate of $v$ in the basis $(v_i)$. \end{remark} \begin{proof} Trivial. \end{proof} \begin{lemma} If $S$ is a finite set that spans $V$, then a subset of $S$ is a basis of $V$. \end{lemma} \begin{proof} If $S$ is independent, then we are done. Otherwise, there is some $\lambda\neq 0$ and $\lambda_w$ such that there is $v\in S$ with $$\lambda v+\sum_{w\in S\setminus\{v\}}\lambda_ww=0\implies v=\frac{1}{\lambda}\sum_{w\in S\setminus\{v\}}\lambda_ww\in\langle S\setminus\{v\}\rangle$$ Therefore $S\setminus\{v\}$ also spans $V$. We can repeat this process and, by the well-ordering of $\mathbb N$, will reach a basis. \end{proof} \begin{theorem}[Steinitz Exchange Lemma]\label{steinitz} Let $V$ be a finite dimensional vector space over $F$, $\{v_1,\ldots,v_m\}\subset V$ linearly independent, $\{w_1,\ldots,w_n\}\subset V$ a generating set, then:\\ 1. $m\le n$.\\ 2. Up to relabeling, $\{v_1,\ldots,v_m,w_{m+1},\ldots,w_n\}$ spans $V$. \end{theorem} \begin{proof} Suppose $\{v_1,\ldots,v_l,w_{l+1},\ldots,w_n\}$ spans $V$ for some $l<m$, then $$\exists\alpha_i,\beta_i\in F, v_{l+1}=\sum_{i\le l}\alpha_iv_i+\sum_{i>l}\beta_iw_i$$ But $\{v_i\}$ is linearly independent, so one of the $\beta_i$ is nonzero. By relabelling $\beta_{l+1}\neq 0$, then $w_{l+1}\in\langle\{v_1,\ldots,v_l,v_{l+1},w_{l+2}\ldots,w_n\}\rangle$, therefore the set of vectors $\{v_1,\ldots,v_l,v_{l+1},w_{l+2}\ldots,w_n\}$ also spans $V$. The theorem is then obvious by induction. \end{proof} \begin{corollary}\label{dim_well_defined} Let $V$ be a finite dimensional vector space, then any two bases of $V$ have the same cardinality. \end{corollary} \begin{proof} Immediate. \end{proof} This corollary allows us to give a proper definition of the dimension of a vector space. Before we step right into that, another corollary of Theorem \ref{steinitz} can help us to capture important properties of a finite dimensional vector space that will come in handy in further discussions of basis. \begin{corollary} Let $V$ be a vector space with $\dim V=n$, then:\\ 1. Any independent set of vectors has size at most $n$. The size is exactly $n$ iff this set is a basis.\\ 2. Any spanning set has size at least $n$. The size is exactly $n$ iff this set is a basis. \end{corollary} \begin{proof} Obvious. \end{proof}
{ "alphanum_fraction": 0.6517794836, "avg_line_length": 54.2803030303, "ext": "tex", "hexsha": "bbb9b0276b60216006065764a874fac3ae921baa", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "5a499f7ed33ef0110facb27323e13f42883aa0c5", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "david-bai-notes/IB-Linear-Algebra", "max_forks_repo_path": "2/span.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5a499f7ed33ef0110facb27323e13f42883aa0c5", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "david-bai-notes/IB-Linear-Algebra", "max_issues_repo_path": "2/span.tex", "max_line_length": 225, "max_stars_count": null, "max_stars_repo_head_hexsha": "5a499f7ed33ef0110facb27323e13f42883aa0c5", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "david-bai-notes/IB-Linear-Algebra", "max_stars_repo_path": "2/span.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 2443, "size": 7165 }
\documentclass[11pt,a4paper,leqno]{extarticle} \usepackage[margin=1in]{geometry} \usepackage[utf8]{inputenc} \usepackage{booktabs} % for toprule, midrule and bottomrule \usepackage{adjustbox} \usepackage{amsmath} \usepackage{bbold} \usepackage{etoolbox} \usepackage{setspace} % for \onehalfspacing and \singlespacing macros \usepackage[hidelinks]{hyperref} \usepackage{array} \usepackage{graphicx} \usepackage{setspace} \usepackage{caption} \usepackage{pdflscape} \usepackage{caption} \usepackage{tabularx} \usepackage{authblk} \usepackage{float} \usepackage{siunitx} \usepackage{titlesec} \usepackage{pgfplots} \usepackage[authoryear]{natbib} \usepackage{scrextend} \usepackage{nicefrac} \usepackage{enumitem} \usepackage{multirow} \usepackage{xcolor} \usepackage{cleveref} \usepackage{varwidth} \usepackage{gensymb} % section headings \renewcommand{\thesection}{\Roman{section}.\hspace{-0.5em}} \renewcommand\thesubsection{\Alph{subsection}.\hspace{-0.5em}} \renewcommand\thesubsubsection{\hspace{-1em}} \newcommand{\subsubsubsection}[1]{\begin{center}{\textit{#1}}\end{center}} \titleformat{\section} {\bf\centering\large}{\thesection}{1em}{} \titleformat{\subsection} {\itshape\centering}{\thesubsection}{1em}{} \titleformat{\subsubsection} {\bf}{\thesubsubsection}{1em}{} % section referencing \crefformat{section}{\S#2#1#3} \crefformat{subsection}{\S#2#1#3} \crefformat{subsubsection}{\S#2#1#3} \crefrangeformat{section}{\S\S#3#1#4 to~#5#2#6} \crefmultiformat{section}{\S\S#2#1#3}{ and~#2#1#3}{, #2#1#3}{ and~#2#1#3} % multiline cells \newcommand{\specialcell}[2][c]{% \begin{tabular}[#1]{@{}c@{}}#2\end{tabular}} % proper caption centering \DeclareCaptionFormat{centerproper}{% % #1: label (e.g. "Table 1") % #2: separator (e.g. ": ") % #3: caption text \begin{varwidth}{\linewidth}% \centering #1#2#3% \end{varwidth}% } % caption set up \captionsetup[table]{ font = {sc}, labelfont = {bf} } % caption set up \captionsetup[figure]{ font = {sc}, labelfont = {bf} } % math claims \newtheorem{theorem}{Theorem} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{proposition}{Proposition} \newenvironment{proof}[1][Proof]{\noindent\textbf{#1:} }{\ \rule{0.5em}{0.5em}} % hyperlinks \definecolor{darkblue}{RGB}{0,0,150} \hypersetup{ colorlinks=true, linkcolor = darkblue, urlcolor = darkblue, citecolor = darkblue, anchorcolor = darkblue } % bibliography \makeatletter \renewenvironment{thebibliography}[1] {\section{References}% \@mkboth{\MakeUppercase\refname}{\MakeUppercase\refname}% \list{}% {\setlength{\labelwidth}{0pt}% \setlength{\labelsep}{0pt}% \setlength{\leftmargin}{\parindent}% \setlength{\itemindent}{-\parindent}% \@openbib@code \usecounter{enumiv}}% \sloppy \clubpenalty4000 \@clubpenalty \clubpenalty \widowpenalty4000% \sfcode`\.\@m} {\def\@noitemerr {\@latex@warning{Empty `thebibliography' environment}}% \endlist} \makeatother % etoolbox \AtBeginEnvironment{quote}{\singlespacing} \begin{document} \title{\singlespacing{\textbf{Efficient Pollution Abatement in Electricity Markets with Intermittent Renewable Energy}}} \author[]{Saketh Aleti\thanks{Saketh Aleti: Graduate Student, Department of Economics, Duke University, 419 Chapel Drive, 213 Social Sciences Bldg., Durham, NC 27708-0097, USA (email: [email protected]).} \, and Gal Hochman\thanks{ Gal Hochman: Professor, Department of Agriculture, Food \& Resource Economics, Rutgers University, 116 Cook Office Building, 55 Dudley Road, New Brunswick, NJ 08901, USA (email: [email protected]).}} % \date{\vspace{-1em}} \maketitle % Syntax: \begin{addmargin}[<left indentation>]{<indentation>} \begin{addmargin}[0.5in]{0.5in} \textit{In this paper, we present a model of the electricity sector where generation technologies are intermittent. The economic value of a electricity generation technology is given by integrating its production profile with the market price of electricity. We parametrize this model empirically and generate numerical results. The introduction of intermittency results in a non-constant elasticity of substitution between renewable and fossil energy. Consequently, this suggests that the efficacy and welfare effects of carbon taxes and renewable subsidies vary geographically. Subsidizing research into battery technology can mitigate this distributional side effect while complementing traditional policies used to promote renewable energy. } \\ \noindent\textbf{Key words:} renewable energy, intermittency, pollution, environment \noindent\textbf{JEL Classifications:} Q28, Q41, Q48, Q52, Q55, Q58 \end{addmargin} \section{Introduction} Renewable energy technologies have seen considerable adoption over the last few decades \citep{EIArenew}. Unlike the alternatives, wind and solar power are particularly unique in that the amount of energy they supply is intermittent. Consequently, designing economically efficient policies to promote their adoption is not straightforward given that they cannot easily substitute for fossil fuel technologies such as coal power. Moreover, traditional approaches such as the levelized cost of electricity (LCOE) fail to capture the true economic value of intermittent technologies, because they neglect to account for variation in output and prices over time \citep{Joskow2011}. Some of the literature has approached this problem by constructing numerical models that find the cheapest renewable technology set while accounting for intermittent supply. For instance, \citet{MN2006} model uncertain renewable output with intertemporal generation constraints, while \citet{NCK2007} model the temporal and spatial characteristics of wind output to optimize its deployment in the UK. Other papers study how intermittent technologies affect the market itself; \citet{AC2012} study the interaction between intermittent renewables and traditional reliable sources of energy in decentralized markets, and \citet{Chao2011} models alternative pricing mechanisms for intermittent renewable energy sources. Additionally, \citet{Boren2012} reviews the effects of present public policies used to promote renewables and the challenges posed by intermittency. Our model comes closest to that of \citet{HH} who build a peak-load pricing model where the availability of renewable capacity varies stochastically. This stochastic variability models intermittency and negatively affects the adoption of renewables. Furthermore, they simplify their analysis by setting aside complications such as outage costs and rationing rules. Their model finds an S-shaped adoption curve for renewables as they get cheaper. In addition, they find that a Pigouvian tax can properly internalize the costs of fossil fuels in a setting with perfect competition. Like \citeauthor{HH}, we model the equilibrium of a market with dynamic prices and access to both renewable and fossil energy. However, our results differ in key ways, because we take a different approach to modeling intermittency. To highlight this difference, we must first contrast the terms \textit{intermittency} and \textit{reliability}. By intermittency, we mean predictable changes in output related to physical constraints. For instance, while wind energy output varies over time, if we know the wind speed and angle, we may precisely derive the quantity of energy that a wind farm generates.\footnote{ \citet{Foley2012} provide a review of the literature on the forecasting of wind power. Over time, forecasts have gotten much more accurate allowing electricity grids to manage wind power intermittency ahead of time.} At the same time, forecasts can never be perfectly accurate, so we may see still unexpected variation in electricity output. This unpredictability is better captured by the notion of reliability; specifically, we refer to definition provided by the US Department of Energy's \citet{ORNL} -- ``Power reliability can be defined as the degree to which the performance of the elements in a bulk system results in electricity being delivered to customers within accepted standards and in the amount desired." This is more so related to the stochastic or unpredictable variation in the output of a technology. Unlike intermittency, this cannot be planned around with 100\% certainty. A practical example may be a wind turbine's systems failing. While we may know the chances of this occurring, it's not always possible to know when it will occur ahead of time; consequently, this may result in a temporary reduction in the quantity and quality of the electricity delivered -- a loss of reliability. In short, when \citeauthor{HH} model renewable output as equal to a base capacity multiplied by a uniform random variable, their treatment is closer to one of reliability rather than intermittency. On the other hand, we model intermittency as defined above by allowing the output of renewable energy to differ between periods according to known function. For parsimony, we assume that the output of each technology does not vary stochastically; that is, we do not study reliability. Next, we model the electricity sector using a representative firm that chooses and builds capacity from a set of electricity-generating technologies to maximize profit; some of these technologies are intermittent while others have constant output. Then, we consider a representative consumer who purchases varying quantities of electricity in each period in order to maximize utility. People prefer to smooth their electricity consumption over time, so we model our consumer's preferences using a CES function of electricity consumption differentiated by period.\footnote{ The use of a CES function in this way has been explored earlier by \citet{Schwarz}, \citet{Schwarz}, \citet{Herriges}, and \citet{KS1994}, \citet{Aubin}, and \citet{Moha2016}. Their papers empirically estimate the parameters for this function; we provide a more detailed discussion of the empirical literature in our \hyperref[sec:methodology]{Methodology}.} These two sides of the market reach an equilibrium through adjustments in prices of electricity in each period. Overall, we make many of the same assumptions that \citet{HH} do; we assume dynamic pricing,\footnote{ \citeauthor{HH} motivate models incorporating the dynamic pricing of electricity. They argue that such approaches to pricing will become the norm with further technological advances and coming regulatory changes.} no load rationing, and positive prices. Then, we parametrize our model empirically. We fit the parameters of the consumer's CES utility function using electricity consumption and price data for each US state. That is, we estimate the intertemporal elasticity of substitution for electricity consumption; this parameter plays a particularly important role in our model, since it captures the effects of intermittency on demand. Next, to model the supply side, we narrow our framework to a two-period, two-technology setting to focus on the substitutability between renewable and fossil energy. We proxy for renewable and fossil energy using solar and coal; we parametrize each accordingly. Finally, we implement our model numerically and provide suggestions for policy and future models. Our results show that the elasticity of substitution between renewable and fossil energy is not constant. This is important because a significant amount of literature has assumed a CES structure between renewable and fossil energy (see \citet{Pap}); this assumption has been motivated, in part, by the need to capture imperfect substitutability between these two generation technologies as a result of intermittency. But, our model finds that the elasticity of substitution between renewable and fossil energy is far from constant as a \textit{consequence} of intermittency. Specifically, this elasticity appears to vary with the quantity of each energy technology. Hence, we argue against assuming a CES structure. Alternatively, we find that a VES production function can be used to approximate the relationship that our model finds between the elasticity of substitution and the quantities of each technology. That is, this relationship is roughly linear which is exactly what a VES function assumes. Moreover, a VES function is analytically tractable, so it can be implemented in other frameworks without making them overly complicated. Additionally, our results have multiple important implications for environmental/energy policy. Firstly, we find that the welfare burden of a carbon tax and the effect of subsidies on renewable adoption both vary geographically. This variation is a consequence of differences in the intermittency and availability of renewable energy by location. Moreover, this can create a trade-off between equitably and efficiently mitigating climate change. Hence, policymakers should account for geographic variation in energy markets to ensure they handle this equity-efficiency trade-off appropriately. Secondly, we find strong motivation for research subsidies aimed at improving battery technology. That is, better energy storage can greatly increase the elasticity of substitution between renewable and fossil energy. Also, batteries can lessen the distributional side effects of carbon taxes and renewable subsidies by reducing intermittency. Hence, research into improving batteries can complement other policies by making them more equitable and by increasing their impact on the adoption of renewables. Finally, we revisit the results of \citet{Ace2012} and qualitatively discuss the implications of our model in their setting. We now discuss our model in greater detail. \section{Model} \subsection{Consumers} \label{sec:consumers} Consumers purchase a quantity of electricity $Z_t$ in each period $t$. Furthermore, they demand a greater quantity of electricity in certain periods; for instance, consumers need more electricity during the middle of the day more than at night. At the same time, consumers are willing to shift their consumption from one period to another in response to a shift in prices. Overall, these characteristics can be captured using a standard CES utility function. Furthermore, since CES preferences are homothetic, we may aggregate the consumers into a single representative consumer. This consumer has the utility function \begin{equation} U = \left( \sum_t \alpha_t Z_t^\phi \right)^{1/\phi} \end{equation} where $\sigma = 1/(1-\phi)$ is the intertemporal elasticity of substitution for electricity consumption. To ensure that indifferences surfaces are convex, we have $\phi \in (0,1)$ or, equivalently, $\sigma > 0$; additionally, since electricity consumption increases utility, we must have $\alpha_t > 0$ for all $t$. For simplicity, we also define $\sum_t \alpha_t = 1$, so that a 1\% increase in electricity consumption in all periods increases utility by 1\%; consequently, we then have $\alpha_t \in (0,1)$. The budget constraint is given by \begin{equation} I = \sum_t p_t Z_t \end{equation} where $p_t$ is the price of electricity in period $t$ and $I$ is the income. Our representative consumer maximizes utility against this budget constraint; the first order conditions of this problem imply: \begin{align}\label{eq:demand} Z_t &= \left(\frac{\alpha_t}{p_t} \right)^\sigma \frac{I}{P} \\ P &= \sum_t \alpha_t^\sigma p_t^{1-\sigma} \end{align} where $P$ is the price index. Note that this model naturally does not allow for blackouts in equilibrium, since the price of electricity in any period gets arbitrarily large as the quantity of energy consumed in that period approaches 0. Furthermore, note that prices must be positive; although this is sometimes violated in reality, we do not believe that this assumption significantly affects our analysis. \subsection{Firms} Secondly, we have firms maximizing profit by picking an optimal set of energy inputs. In reality, electricity markets are fairly competitive, so we can model the set of firms by using a single representative firm that sets marginal revenue equal to marginal cost. We let $X_i$ represent the quantity of energy technology $i$ , and we define its output per unit in period $t$ as $\xi_{1,t}$. So, for example, if $i$ is solar power, $X_i$ would be the number of solar panels and $\xi_{i,t}$ may be kWh generated per solar panel in period $t$. Consequently, the energy generated in period $t$, $Z_t$, is given by $\sum_i \xi_{i,t} X_i$. To simplify notation, we have \begin{align*} X \equiv \begin{pmatrix} X_1\\ \vdots\\ X_n \end{pmatrix} ,\; Z \equiv \begin{pmatrix} Z_1\\ \vdots\\ Z_m \end{pmatrix} ,\; p \equiv \begin{pmatrix} p_1\\ \vdots\\ p_m \end{pmatrix} ,\; \xi \equiv \begin{pmatrix} \xi_{1,1} & \dots & \xi_{1,m}\\ \vdots & \ddots & \\ \xi_{n,1} & & \xi_{n,m} \end{pmatrix} \end{align*} where we have $n$ technologies, $m$ periods, and $Z \equiv \xi^T X$. A key element of this model is that $X$ does not vary by time. This is important because the time scale of our model must be fairly granular to study the effects of intermittency. So, for instance, we may have $t \in \{ 1, \dots, 24\}$ representing each hour of the day. Over such short time scales, the intermittency of renewables like solar and wind can significantly affect their economic value. Additionally, this allows us to assume that producers cannot modify the quantity of the technologies they deploy. And, thirdly, for certain technologies, this time frame is short enough to assume that $\xi$ is exogenous; that is, technologies like coal power cannot significantly modify their output within a day, so $\xi$ can be treated as a given set of constants. These latter two assumptions, that $X$ cannot change over time and that $\xi$ is exogenous, are important for parsimony, because they prevent more complicated dynamics for entering our model. So, by assuming a sufficiently short time frame, we can continue our study of intermittency in a relatively simpler setting. Next, our representative firm sets $X$ once to maximize total profit while facing the cost function $C(X_1, \dots, X_n)$. \citet{HH} note that past literature has argued for concave cost functions in the energy market due to effects such as economies of scale and learning-by-doing; on the other hand, standard cost functions are generally convex. So, like Helm and Meir, we take an intermediate approach by using a linear cost function. Specifically, we have $C(X) \equiv \sum_i c_i X_i \equiv c^T X$ where $c_i$ is the cost per unit of $X_i$. Total profit is given by \begin{equation} \Pi = p^T Z - c^T X \end{equation} To simplify the algebra, we set the number of technologies equal to the number of periods $(n=m)$. Additionally, we further require that the output per unit of each technology is unique and non-negative in each period; in other words, the output per unit of one technology is not a linear combination of those of the other technologies in our set. This then implies that $\xi$ is of full rank and therefore invertible. Now, maximizing profit, we find the first order condition: \begin{equation} \frac{\partial \Pi}{\partial X} = 0 \implies p = \xi^{-1} c \end{equation} Combining this FOC with the demand equation (\cref{eq:demand}) allows us to find the equilibrium. Generally, the equilibrium results for any number of technologies ($n$) are analytic, but they are difficult to interpret due to the number of parameters involved. \subsection{Equilibrium} For more tractable results, we consider a simpler scenario where $n = m = 2$ and $\sigma = 1$ (Cobb-Douglas); this particular case is described in greater detail in \hyperref[sec:cobbdoug]{Appendix A.A}. These parameters simplify the model enough to allow us to derive comparative statics and determine which sets of parameters lead to edge cases. Furthermore, as we will discuss in our \hyperref[sec:results]{results}, assuming $\sigma=1$ is not far from its empirical estimate. To start, we discuss the conditions required on the exogenous parameters to avoid edge-cases. But first, we define two recurring terms in our analysis: cost efficiency and output efficiency. For some arbitrary period $t$ and technology $i$, we use cost efficiency to refer to $\xi_{i,t}/c_i$ and output efficiency to refer to $\xi_{i,t}$. So, for example, with two technologies $i$ and $j$ and an arbitrary period $t$, technology $i$ is more cost efficient than technology $j$ in period $t$ when $\xi_{i,t}/c_i > \xi_{j,t}/c_j$. Additionally, we say a technology is economical if its equilibrium quantity is above 0. In order to avoid an edge-case, we require both technologies to be economical; we give the conditions to ensure this for each technology in Proposition 1. \begin{proposition} Assume that, for all technologies $i$ and periods $t$, we have $\xi_{i,t} > 0 \, , \alpha_t > 0 \, ,$ and $ c_i > 0$. For technology $j$ to be economical, it must meet three conditions with respect to some arbitrary period $s$. Firstly, it must be more cost effective than any of the other technologies in period $s$. Secondly, it must have a relative advantage in output efficiency in the same period $s$. And, thirdly, there must be sufficient amount of demand for energy in this period $s$. \end{proposition} The first condition is on cost efficacy and is fairly intuitive. Consider its contrapositive: if a technology does not have an advantage is cost efficiency in any period, it will not be used; this is fairly obvious. Alternatively, if a technology is the most cost effective in every single period, it will be the only technology used. The second condition regarding output efficiency actually stems from the invertibility of $\xi$. If we did not have $\xi$ invertible, we either have at least one technology that does not produce in any period or we have at least one technology being a linear combination of the other technologies in terms of output. In the latter case, some technologies are not economical because their output can be replicated by the other technologies in a more cost effective way. That is, suppose $\xi$ consisted of 3 technologies but was of rank $2$. In this case, we may represent any of these technologies as a linear combination of the other two; call this combination the synthetic version of technology $i$. It is not possible for every synthetic technology $i$ to be more expensive than its original; for at least one $i$, we must have a synthetic technology being cheaper or equal in price to the actual technology. In the first case, synthetic technology $i$ is cheaper so we do not use the actual technology $i$. In the latter case, we may still eliminate technology $i$ or not use the other two technologies depending on the demand. Overall, in any case, $\xi$ not invertible means that at least one technology is not used. Finally, the demand condition is straightforward; even if a technology is optimal in a certain period, if consumers do not sufficiently demand electricity during that period, then there is little reason to use that technology. We may also derive the comparative statics for this simplified scenario. \begin{proposition} Suppose we are not in an edge case, so that the conditions of Proposition 1 hold for each technology. The equilibrium quantity of a technology is increasing with its output efficiency and decreasing with its cost; at the same time, it is decreasing with the output efficiency of the other technologies and increasing with their cost. Also, suppose that some technology $i$ is the most cost effective in period $t$. Then, its equilibrium quantity is increasing with respect to the demand parameter $\alpha_t$ and decreasing with respect to the demand parameters of the other periods. Furthermore, again assuming technology $i$ is the most cost effective in period $t$, the comparative statics of $Z_t$ and $X_i$ are equivalent. \end{proposition} The comparative statics with respect to $X$ and its output efficiency and cost are not surprising. On the other hand, the statics for $Z$ may not be immediately obvious. Instead, they follow from the fact that we have $Z \equiv \xi_T X$. That is, suppose we have an arbitrary technology $i$ that is the most cost effective source of electricity in period $t$. If consumers demanded that 100\% of their energy arrive in period $t$, then relying solely on technology $i$ for energy would be the most economical solution. Consequently, it seems intuitive that the comparative statics of $X_i$ follow through to $Z_t$. This intuition just happens to apply even when other technologies are employed and there is demand in multiple periods. Similarly, the comparative statics for the share parameters of the utility function, $\alpha$, travel in the opposite direction. A rise in $\alpha_t$ would directly raise the optimal quantity of $Z_t$; hence, whichever technology is most cost effective at producing in period $t$ would be used more. We provide a more detailed and formal discussion of the comparative statics and edge cases in the appendix. \section{Empirical Methodology} \label{sec:methodology} In order to better understand the practical implications of our model, we empirically estimate its parameters and study its implications numerically. We are particularly interested in estimating $\sigma$, the intertemporal elasticity of substitution for electricity consumption. In our \hyperref[sec:Discussion]{discussion}, we explain in detail why $\sigma$ is of interest; to summarize, $\sigma$ implicitly determines how well renewables can substitute for fossil energy. Specifically, if $\sigma > 1$, then electricity consumption in different periods are substitutes; consequently, fossil energy and renewable energy are highly substitutable. On the other hand, if $\sigma< 1$, electricity consumption in different periods are complements; so, fossil and renewable energy are far less substitutable due to the effects of intermittency. The other parameters in our model, $c$, $\xi$, and $\alpha$, are of secondary interest, since they are easier to obtain directly. Now, recall the demand equation from earlier $$Z_t = \left(\frac{\alpha_t}{p_t} \right)^\sigma \frac{I}{P}$$ where $P$ is the price index and $I$ is income. Retail customers pay fixed rates each month for electricity, hence $p_t$ is constant within each month; and, we expect that income $I$ does not vary significantly on a daily basis. Consequently, all variation in intramonthly demand is due to the share parameter $\alpha$ and the elasticity $\sigma$. But, this creates a problem; since retail consumers do not face prices that vary each hour, we cannot estimate $\sigma$ on an hourly basis. A number of other papers have approached this problem using data from real-time pricing experiments. \footnote{ There has also been a large literature that directly estimates the price elasticity of electricity demand without imposing a CES functional form. These papers include \citet{Wolak}, \citet{Zarnikau}, \citet{Woo}, \citet{Zhou}, \citet{Reiss}, \citet{Fan}, and \citet{Deryugina}. These papers estimate own-price elasticities, while some also estimate cross-price elasticities for electricity consumption at different times. Because they do not impose a CES structure, we cannot obtain estimates of $\sigma$ from this literature. } In such experiments, consumers of electricity face prices that vary on an hourly basis; this makes it possible to estimate $\sigma$. Papers that use these experiments include \citet{Schwarz}, \citet{Herriges}, and \citet{KS1994}. The latter two papers estimate $\sigma$ to be around $0.1$ while the paper by \citeauthor{Schwarz} obtains estimates around $0.04$. All three papers study real-time electricity pricing programs for industrial consumers using similar methodologies. Additionally, \citet{Aubin} also provide estimates of the $\sigma$ but using a different methodology; their results find an elasticity of substitution below 0. Under a CES structure, this is problematic, because it would imply upward-sloping demand curves. Finally, \citet{Moha2016} also empirically estimates the share parameters for a CES function of this form, but they do not estimate the elasticity of substitution. Overall, the past literature has estimated $\sigma$ by running regressions on the CES demand equation, however we are concerned that this approach suffers from endogeneity. That is, producers may intertemporally substitute electricity generation which essentially means that there is a supply equation affecting prices. For instance, during the oil crisis of 1973, refineries increased gasoline stocks expecting future prices to be higher \citep{genie}. The existence of such behavior implies that estimates of $\sigma$ would be biased downwards unless we properly control for endogeneity. This is particularly important because whether $\sigma$ is closer to 0.1 or 1 significantly changes the practical implications of our model. So, we take a different approach by using a supply instrument to identify the CES demand parameters. Specifically, we use coal prices which affect the supply of electricity but not the demand. Furthermore, we estimate $\sigma$ on a monthly basis. This decision is primarily due to data limitations, since we do not have access to the proprietary data on real-time pricing experiments which the past literature has used. Although we are interested in understanding intertemporal substitution over a shorter time scale (since intermittency plays a larger role in shorter periods), estimates of $\sigma$ on a monthly basis may still be applicable on a smaller time frame. For instance, \citet{Schwarz} estimate $\sigma$ on a daily and hourly basis and find fairly close results; similarly, \citet{Herriges} also find no significant difference in their estimates of $\sigma$ for these two intervals. That is, while a daily basis is 24 times larger than an hourly basis, the estimates for $\sigma$, surprisingly, do not appear to change. Hence, we expect our estimates of $\sigma$ on a monthly basis to not be far from estimates on shorter time scales. At the same time, our estimates of $\sigma$ will likely be larger than that of the literature, because we are controlling for endogeneity. We now define our econometric methodology in detail. \subsection{Theory} We begin with the demand equation from our general model: \begin{align*} Z_t &= \left(\frac{\alpha_t}{p_t} \right)^\sigma \frac{I}{P} \\ P &= \sum_t \alpha_t^\sigma p_t^{1-\sigma} \end{align*} For any pair of electricity outputs $Z_t$ and $Z_s$, we have: $$\frac{Z_t}{Z_s} = \left(\frac{\alpha_t \, p_s}{\alpha_s \, p_t} \right)^\sigma \\$$ Taking logs on both sides and letting $i$ represent different observations, we may rewrite this in a form more suitable for estimation. \begin{align*} \ln (Z_{t, i} / Z_{ s, i}) &= -\sigma \ln (P_{t,i} / P_{s,i}) + \sigma \ln (\alpha_{t,i} / \alpha_{s,i}) \end{align*} Our data differentiates consumption for each state in the US, so we let $i$ refer to a particular state. Additionally, most consumers pay monthly fixed rates for electricity, so we can, at most, estimate this equation on a monthly basis; hence, $t$ and $s$ refer to different months. Also, note that each observation only corresponds to a single state $i$; this is because consumers within each state can substitute consumption across time, but consumers in different states do not substitute consumption with one another. In order to estimate this $\sigma$, we further modify this equation. Firstly, note that we cannot observe the demand shifter $\alpha_{t,i}$ directly, so we must replace the $\alpha$ terms with a set of controls that may be responsible for shifts in demand. So, still in general terms, our regression equation is now \begin{align*} \ln (Z_{t, i} / Z_{ s, i}) &= -\sigma \ln (P_{t,i} / P_{s,i}) + \gamma_{t,i} A_{t,i} + \gamma_{s,i} A_{s,i} + u_i \end{align*} where $A$ represents set of controls for changes in demand while $u_i$ is a normal error term. Note that the control $A_{t,i}$ replaces $\sigma \ln(\alpha_{t,i})$ and likewise for the period $s$ term; this substitution is valid because the $\ln(\alpha_{t,i}) \in \mathbb{R}$ and the $\sigma$ term is simply absorbed into the estimated coefficient $\gamma_{t,i}$. For the demand controls themselves, we consider degree days and the difference in months between periods $t$ and $s$. Firstly, we use degree days rather than temperature due to the aggregation of the data. A degree day is defined as the difference between the average temperature for a day and a base temperature -- our data uses 65 \degree F (18 \degree C). Cooling degree days (CDDs) and heating degree days (HDDs) further split this measure into deviations above and below the base temperature. That is, if the average temperature of a day is $x$ \degree F, its CDD is $\max \{0, x-65\}$ and its HDD is $\max \{0, 65-x\}$ . Since these measures are linear, CDDs and HDDs can be aggregated without losing information. This does not hold true for temperature; averaging temperature over a month causes daily variation to be lost. Secondly, the demand for electricity may rise over time. Hence, we include, as a control, the difference in months between time $t$ and $s$; this is represented by $\Delta_{t,s}$. Finally, this panel requires us to consider fixed effects for each state, so we use a fixed effects panel regression. In total, the demand equation is: \begin{align*} \ln (Z_{ t, i} / Z_{ s, i}) &= -\sigma \ln (P_{t,i} / P_{s,i}) + \gamma_{t,i} A_{t,i} + \gamma_{s,i} A_{s,i} + \eta \Delta_{t,s} + u_i \\ &= -\sigma \ln (P_{t,i} / P_{s,i}) + \gamma_{t,i} \left( CDD_{t,i} + HDD_{t,i} \right) \\ &\qquad + \gamma_{s,i} \left( CDD_{s,i} + HDD{s,i} \right) + \eta \Delta_{t,s} + u_i \end{align*} Still, this equation may suffer from bias, since producers can also substitute production over time. To address endogeneity concerns, we define the following supply equation \begin{align*} \ln (Z_{ t, i} / Z_{ s, i}) &= \beta \ln (P_{t,i} / P_{s,i}) + \xi \ln (C_{t,i} / C_{s,i}) + v_{i} \end{align*} where $C_{t,i}$ is the average cost of coal used for electricity generation in state $i$ at time $t$ and $v_i$ is a normal error term. Coal prices are independent of the electricity demand error term $u_i$, since residential consumers generally do not use coal for electricity generation; on the other hand, shocks in the price of coal are linked with the supply of electricity. Hence, coal price is a theoretically valid instrument. In total, the reduced form equation is given by: \begin{equation} \ln (P_{t,i} / P_{s,i}) = \left( \beta + \sigma \right)^{-1} \left( \gamma_{t,i} A_{t,i} + \gamma_{s,i} A_{s,i} + \eta \Delta_{t,s} - \xi \ln (C_{t,i} / C_{s,i}) + u_{i} - v_i \right) \end{equation} where $A_{t,i}$ consists of CDDs and HDDs at time $t$. Finally, we also consider a semiparametric specification. That is, we allow the error terms $u_i$ and $v_i$ to be non-normal and place the demand controls and instruments in unknown functions. So, overall, we have: \begin{align} \ln (Z_{ t, i} / Z_{ s, i}) &= -\sigma \ln (P_{t,i} / P_{s,i}) + f \left( A_{t,i}, A_{s,i}, \Delta_{t,s} \right) + u_i \\ \ln (Z_{ t, i} / Z_{ s, i}) &= \beta \ln (P_{t,i} / P_{s,i}) + g \left( \ln (C_{t,i} / C_{s,i}) \right) + v_{i} \end{align} where $f$ and $g$ are unknown, bounded functions. We restrict $cov(u_i, v_i) = 0$ but allow for the controls and instruments to be correlated. The advantage of this specification is that we can account for the controls or instrument having any nonlinear effects on the regressands. In order to estimate these equations, we use a procedure based on \citet{Newey} that we describe in further detail in \hyperref[sec:AppendixC]{Appendix C}. \noindent \noindent \subsection{Data} We collect monthly data from the \cite{EIANetgen} on retail electricity prices and consumption for the 48 contiguous US states from 2010 to 2018. Also from the EIA, we obtain data on the average cost of coal for electricity generation for each state and month.\footnote{ The coal price data set contains a large number of missing values due to privacy concerns; however, we do not expect that these missing values are correlated with the data itself.} We deflated both electricity and coal prices over time using the PCEP Index provided by the \cite{USBEA}. Finally, we collect data on HDDs and CDDs from the NOAA's Climate Divisional Database (\citeyear{nCLIMDIV}) for the same panel. Then, we merge these three data sets and trim 1\% of outliers for a total of $818$ observations for each month and state. We provide descriptive statistics for this data in \autoref{table:stats}. We use this preliminary data set to construct the data required for our regressions. That is, each observation in our estimation equation belongs to a set $(t,s,i)$ consisting of two time periods and a state. Hence, we construct each row in our regression data set using unique combinations of $t,s$ where $t \neq s$ for each state $i$. This gives us a total of 6817 observations. All in all, each observation in our regression data set consists the following variables: state ($i$), date 1 ($t$), date 2 ($s$), the log difference in electricity consumption between month 1 and month 2, the log difference in the price of electricity, the log difference in the price of coal, the number of CDDs for each date, the number of HDDs for each date, and the difference in months between dates 1 and 2. \vspace{0.15in} \begin{center} [INSERT Table 1: Descriptive Statistics] \end{center} \vspace{0.15in} \section{Results} \label{sec:results} Based on the OLS results reported in \autoref{table:ols} we estimate the intertemporal elasticity of substitution for electricity consumption $\hat{\sigma} = 1.305$ ($|t| > 24$) when accounting for all degree day covariates and state fixed effects. Additionally, we find that accounting for cooling and heating degree days captures a large amount of variation in demand for electricity; in the fixed effects regressions, adding these controls raises adjusted $R^2$ from 8.5\% to 51.8\%. Also, as expected, the coefficients on the degree day covariates are symmetrical; that is, the coefficient on $CDD_{t}$ is approximately the same as the negative of that on $CDD_{s}$, and the same applies to $HDD_{t}$ and $HDD_{s}$. \footnote{It is important to note that the reason they are not perfectly equal is because our regression data consists of each unique \textit{combination} of $(t,s)$ for each $i$; that is, if we have an arbitrary observation $(t,s,i)$ in our data, then $(s,t,i)$ does not also appear. Including these observations would not affect $\sigma$ and they also don't add any further information to our regression. However, they would make the magnitude of the estimated coefficients for CDD$_t$ and CDD$_s$ equivalent (and likewise for HDDs). } Furthermore, we found that electricity consumption seems to rise more in response to CDDs rather than HDDs. Next, we find that the sign on $\Delta_{t,s}$ is positive in all regressions, so electricity consumption rises over time independent of price. Specifically, fit (6) finds a coefficient of $0.000655$ which implies that electricity consumption rises, on average, by $\approx 0.7\%$ per year. Finally, adding state fixed effects seems to raise the estimates of $\sigma$ for all fits. \vspace{0.15in} \begin{center} [INSERT Table 2: OLS Regression Results] \end{center} \vspace{0.15in} To account for endogeneity in the OLS results, we provide results for our IV specification in \autoref{table:iv}. Here, we find a much larger estimate of $\hat{\sigma} = 5.818$ ($|t| > 11.1$) when considering all covariates and fixed effects. F-Statistics on all three specifications are significantly larger than 10, which suggests that the instruments are not weak \citep{SS1997}. These results greatly differ from the literature's estimates probably as a consequence of controlling for endogeneity. That is, ignoring variation on the supply side would bias OLS estimates for the effect of price on demand downwards; by controlling for this, we find larger estimates of $\sigma$ which is expected. Next, with respect to the demand controls, we find results similar to those of OLS. The magnitudes of the coefficients on DDs do not appear to be significantly different. Furthermore, we again find that CDDs affect electricity consumption more than HDDs. But, this time, the coefficients on heating degree days do not appear to be significant. Additionally, in contrast with the OLS results, we find a much larger estimate for the coefficient on $\Delta_{t,s}$ of $0.003$; this implies that electricity consumption seems to rise by about 3.65\% per year. So, overall, our IV results suggest much higher estimates of $\sigma$. \vspace{0.15in} \begin{center} [INSERT Table 3: IV (2SLS) Regression Results] \end{center} \vspace{0.15in} Finally, we control for nonlinear effects using a partially linear IV regression reported in \autoref{table:pariv}. Here, we find much smaller results than 2SLS when controlling for all possible covariates. Specifically, in fit (3), we have $\hat{\sigma} = 0.8847$ ($|t| > 20$). The estimates of $\sigma$ with fewer controls are much larger. However, both the time and degree day controls were highly significant in the OLS and IV results; thus, it seems appropriate to include both controls in this regression. That is, fit (3) is likely the most appropriate specification. Additionally, all of these estimates are significantly different from the 2SLS results which suggests that the 2SLS results are not robust. In other words, our controls and instrument likely have non-linear effects on price and quantity which cannot be captured by linear models. Alternatively, this difference may also be because the true error term is not Gaussian. In any case, we believe that our third semiparametric fit, $\hat{\sigma} = 0.8847$, is the most robust estimate of $\sigma$. \vspace{0.15in} \begin{center} [INSERT Table 4: Partially Linear IV Regression Results] \end{center} \vspace{0.15in} We also consider additional robustness checks for the estimate in fit (3). Firstly, we check if outliers are affecting the results meaningfully. We run the fit (3) regression but trim a larger number of outliers. We find that trimming an additional 1\%, 5\%, or 10\% of outliers does not appear to significantly change the estimate of $\hat{\sigma}$ or meaningfully affect its standard error. Secondly, we test whether any particular states are driving the results. We run regress fit (3) on 48 subsamples; in each subsample, one of the 48 states in our data set is dropped out. We plot the results in the Appendix \autoref{fig:regstaterobust}. The mean and median of the estimates for $\sigma$ are $0.8873$ and $0.8860$; additionally, 95\% of the estimates lie in $(0.8333, 0.9343)$. All of these estimates are highly significant -- the average $|t|$ value is $19.88$ and the smallest $|t|$ is $18.52$. And, although, it seems that some of the estimates are (statistically) significantly from the full sample estimate of $\hat{\sigma} = 0.8847$ (stdev = $0.044$), the magnitude of the difference does not appear to be economically meaningful. That is, the largest estimate of $\hat{\sigma}$ is 0.9793 which is only 0.0946 larger than the full sample estimate, while the smallest estimate is 0.8174 which is 0.0674 smaller than the full sample estimate. These differences are not meaningful in the practical application of our model. To elaborate on this and the full economic implications of $\sigma$, we now discuss what these estimates mean in the context of renewable energy policy. \clearpage \section{Discussion} \label{sec:Discussion} \subsection{The Elasticity of Substitution between Renewable and Fossil Energy} To start, we consider how $\sigma$ affects the substitutability of fossil and renewable energy. This is important, because sufficient substitutability between these two technologies is required to transition into greener economy in the future. For instance, \citet{Ace2012} provide a model where they argue that, ``When the two sectors [clean and dirty energy] are substitutable but not sufficiently so, preventing an environmental disaster requires a permanent policy intervention. Finally, when the two sectors are complementary, the only way to stave off a disaster is to stop long-run growth." However, note that while our model sets the intertemporal elasticity of substitution for electricity consumption $\sigma$ to a fixed value, we make no direct assumptions about the elasticity of substitution between different energy technologies. Instead, this latter elasticity is indirectly defined by the substitutability of electricity consumption between periods. To illustrate this, we parametrize and estimate our model numerically. Firstly, let technology 1 be coal power and technology 2 be solar power. These two technologies exemplify consistent fossil energy and intermittent renewable energy. Furthermore, let period $t$ represent peak hours and period $s$ represent off-peak hours. We assume that, holding prices equal, consumers prefer that approximately 60\% of their energy arrive in period $t$ and the remaining 40\% arrive in period $s$; that is, we have $\alpha_t = 0.6$ and $\alpha_s = 0.4$. Since the union of both periods makes up only a single day, our model's assumption of an exogenous and constant $\xi$ is fairly reasonable in this context. Next, we normalize all quantity units to a MWh basis. Hence, we let $\xi$ represent the percent of capacity utilized in each period; we assume coal uses 100\% of its capacity in both periods, while solar can access 100\% during peak hours and only 10\% during the off-peak. Finally, we set the cost parameters, given in \$ per MWh, equal to LCOE\footnote{ The LCOE for a generation technology is equal to the sum of its lifetime costs divided by its lifetime energy output. \citet{Joskow2011} explains the flaws of comparing generation technologies solely on the basis of LCOE; but, our use of this measure is unrelated to his critique. That is, he argues that the economic value an intermittent technology should also account for when it produces electricity and the prices of electricity in those periods (see Table 2 of his paper). Our model does exactly this; but, we still need to use LCOE to parametrize the cost of developing capacity. } estimates for 2023 from the \citet{EIALCOE}; specifically, we use estimates for "Solar PV" and "Coal with 30\% CCS" from Table 1b. In total, we have the following parameters. \begin{center} \noindent \textbf{Model Parameters:} $\alpha_t = 0.6$, $\alpha_s = 0.4$, $\xi_1 = (1, \, 1)$, $\xi_2 = (1, \, 0.1)$, $c_1 = 104.3$, $c_2 = 60$. \label{Params: Example A} \end{center} We begin by exploring the implied elasticity of substitution between these two technologies. Recall, for any two commodities $i$ and $j$, the elasticity of substitution $e_{i,j}$ is given by: $$e_{i,j} = \frac{\partial \log (X_i/ X_j)}{\partial \log (c_j/c_i)}$$ where $c$ is their prices. This relationship is of particular interest, because many applied and theoretical models studying renewable and fossil energy impose a CES production structure between the two (see \citet{Pap}). That is, they assume that $e$ is a fixed constant between renewable and fossil energy. In our model, this is not the case; while directly deriving the equation for $e_{1,2}$ in our model is analytically intractable, we numerically estimate how it varies with $\log (c_2/c_1)$ and $\sigma$. \vspace{0.15in} \begin{center} [INSERT Figure 1: The Elasticity of Substitution between Solar and Coal] \end{center} \vspace{0.15in} We plot our numerical estimates of $e_{1, 2}$, the elasticity of substitution between solar and coal power, in \autoref{fig:eosnum}. We generated these results by computing the optimal quantities of each technology given a range of relative prices $c_2/c_1$. Then, we numerically differentiated $\log(X_1/X_2)$ with respect to $\log(c_2/c_1)$ to estimate the elasticity of substitution between solar and coal. This numerically computed elasticity of substitution between solar and coal power, $e_{solar,coal}$, is shown in the lower subplot. We repeat this process with different values of $\sigma$, the elasticity of substitution in the consumer's utility function. Specifically, we use our estimate of $\hat{\sigma} = 0.8847\, (0.044)$ and its 95\% confidence interval $(0.7985, 0.9709)$. Finally, after finding the optimal values of our quantities $X$, we filter all observations of our numerical simulation that correspond to edge cases -- solutions where $X, Z \leq 0$. The first relationship we see in \autoref{fig:eosnum} is that $e_{1,2}$ varies non-linearly with the relative costs of each technology; in particular, it appears to take on a hockey stick shape. This shape is a result of both intermittency and costs. Intermittency reduces substitutability as overall energy generation becomes more intermittent. In other words, intermittency is a bigger issue for consumers when their energy supply is not consistent. This causes $e_{1,2}$ to fall when the majority of energy generated comes from solar; naturally, the latter occurs when $\log(c_2/c_1)$ is small (solar is relatively cheap). At the same time, quantities become more sensitive to costs when costs greatly differ.\footnote{We explore this point further in Appendix B in which we consider a numerical example where technologies are minimally intermittent. That is, we again consider a two-period, two-technology example, but assume that $\xi_1$ and $\xi_2$ are nearly constant over time. We plot our results in \autoref{fig:eosrange} and find that the elasticity of substitution takes on a u-shape. } That is, $e_{1,2}$ rises when the relative prices of solar and coal are very different. When these two effects interact, we see the elasticity of substitution between solar and coal takes on a hockey stick shape. Additionally, note that the elasticity of substitution $e_{1,2}$ between solar and coal becomes larger and more non-linear as $\sigma$ rises. Firstly, the reason why it becomes larger is fairly intuitive. When a consumer's intertemporal elasticity of substitution for electricity consumption $\sigma$ is large, they will be more sensitive to changes in the price of electricity and more willing to substitute their electricity consumption across periods. As a result, the intermittency of solar becomes less of an issue for consumers, but its relative price with respect to coal will play a bigger role. Consequently, this implies that the elasticity of substitution $e_{1, 2}$ will be larger. Hence, in the lower subplot of \autoref{fig:eosnum}, we see that $e_{1, 2}$ rises with $\sigma$ when holding the relative prices of each technology constant. Secondly, we find that $e_{1,2}$ becomes more u-shaped as $\sigma$ rises. This is because larger values of $\sigma$ weaken the disincentive created by intermittency. Consequently, the shape of the elasticity of substitution $e_{1,2}$ becomes more dependent on the relative costs. As argued earlier, when costs differ greatly between technologies, the elasticity of substitution $e_{1,2}$ is higher. \subsection{Implications for Environmental Policy} Overall, our results suggest that the elasticity of substitution between renewable and fossil energy is not constant. Furthermore, it appears to fall with the level of intermittency. What implications does this result have? \subsubsection{Revisiting Past Models} To start, we consider how a variable elasticity of substitution affects past models of the energy sector. As an example, consider the results of \citet{Ace2012}. Firstly, they find that the short-run cost of policy intervention is increasing with the elasticity of substitution between clean and dirty technologies.\footnote{ See Corollary 1 of their paper for a proof and intuitive explanation.} Additionally, their paper states that the cost of delaying intervention is increasing with the elasticity of substitution. Secondly, Acemoglu et al. argue that, when the discount rate and elasticity of substitution between clean and dirty energy ($e$) is sufficiently low, a disaster cannot be avoided under laissez-faire.\footnote{ See Proposition 9 of their paper.} And, finally, Acemoglu et al. find that, ``when the elasticity of substitution is high\dots\, a relatively small carbon tax is sufficient to redirect R\&D towards clean technologies." Based on our results, each of these statements has a complementary interpretation in terms of intermittency. For instance, the first statement suggests that the cost of policy intervention is decreasing with the intermittency of clean technologies; the cost of delaying intervention is decreasing with intermittency as well. This is because the elasticity of substitution between renewables and fossil fuels becomes small when renewables are more intermittent. So, in other words, delaying policy intervention will be the most costly in regions with access to clean, non-intermittent energy (such as hydro and geothermal energy). Secondly, when the discount rate is sufficiently low and the intermittency of clean energy is sufficiently high, a disaster cannot be avoided under laissez-faire. The intuition here is that, even if clean energy became relatively cheap, its intermittency would prevent it from adequately substituting for fossil energy. Finally, Acemoglu et al. argue that the size of a carbon tax should be inversely proportional to the elasticity of substitution and should decrease over time. Recall that, in our model, the elasticity of substitution $e$ changes with relative prices; when fossil fuels are relatively cheap, $e$ is large. Hence, this implies we need a relatively small carbon tax early on to spur research. As technological change makes renewables cheaper, fossil fuels will become relatively more expensive so $e$ will fall; thus, the carbon tax will need to increase over time. These dynamics (a rising carbon tax) contrast with the results of Acemoglu et al. who numerically show that an optimal carbon tax should decrease over time when $e$ is sufficiently large. A potential resolution to these contradictory solutions may be to take an intermediate route and maintain a constant carbon tax over time. \vspace{0.15in} \begin{center} [INSERT Figure 2: The Price Elasticity of Demand for Coal Power] \end{center} \vspace{0.15in} \subsubsection{Carbon Taxes and their Distributive Consequences} Our results also suggest that carbon taxes can have important distributional consequences. Earlier, we showed that the elasticity of substitution between renewable and fossil fuels $(e)$ is non-constant and falls with the intermittency of present generation. This further implies that the elasticity of demand for generation technologies is non-constant as well. Specifically, the elasticity of demand for non-intermittent energy increases then decreases with respect to its own price. Using the earlier numerical example of solar and coal power, we show this explicitly in \autoref{fig:coalelas}. As the price of coal power rises significantly beyond its current price, demand gets more elastic because its price becomes the primary factor disincentivizing its use. But, initially, a rise in the cost of coal power causes its demand to become inelastic. This is because a rise in the price of coal shifts generation towards solar; consequently, generation becomes more intermittent and demand for coal becomes less price sensitive since it is needed to help smooth electricity generation. This relationship should be of concern for policy makers considering a pollution tax. Consumers in regions without access to clean, non-intermittent energy will have the most inelastic demand for fossil fuels; hence, they will bear the greatest welfare losses from a tax on pollution/carbon. On the other hand, consumers in regions that have access to dispatchable, renewable energy such as hydropower will see smaller welfare losses; the demand for fossil fuels in these regions will not be as inelastic, because dirty generation can be replaced with non-intermittent, clean generation. All in all, if a carbon tax were implemented federally and its revenue were distributed equally, it may nevertheless function as an inequitable, between-state welfare transfer due to differences in the availability of renewable energy technology. This same argument would apply to carbon quotas. \vspace{0.15in} \begin{center} [INSERT Figure 3: The Effect of Battery Storage on the Elasticity of Substitution between Solar and Coal] \end{center} \vspace{0.15in} \subsubsection{The Case for Subsidizing Battery Research} However, there are alternative policies that can mitigate this distributional side effect. One such policy is a research subsidy for improving battery technologies. Reducing battery costs, improving their storage capacity, and reducing their energy loss can allow intermittent renewables to far more easily substitute for traditional, fossil energy. To understand the magnitude of this effect, we again provide a numerical example using solar and coal. We aim to understand how batteries effect substitutability, so consider a parsimonious model where batteries are used to shift a portion of solar energy output from its high-output period to its low-output period.\footnote{Because we are only interested in the effects of batteries on $e$, we ignore the energy losses and costs of battery storage. Furthermore, if our goal instead was to economically optimize battery storage and deployment, we would have to rewrite our model as a dynamic optimization problem and solve using optimal control theory. However, we set aside these complications and consider a second-best solution where batteries are used to shift a fraction of solar output from its low-output period to its high-output period. But, since we are shifting relatively marginal fractions of energy output, this is likely equivalent to the first-best solution. } Specifically, we initially parametrized solar with $\xi_{2} = (1, 0.1)$ implying that it functions at 100\% of its potential capacity during the peak and at 10\% of its potential capacity during the off-peak; this is far from matching consumer demand. So, we represent solar output with batteries using $\xi_2 = (0.95, 0.15)$ and $\xi_2 = (0.90, 0.20)$; this is equivalent to transferring 5\% and 10\% of solar output. We plot our results in \autoref{fig:battery}. It is immediately clear that making solar output more consistent through the day results in a large increase in the elasticity of substitution $e_{1,2}$. Moreover, $e_{1,2}$ no longer tapers off around 4 as solar becomes the dominant source of electricity. Rather, with batteries, cost plays a much larger role in determining the optimal quantity of each technology even when solar is relatively cheap. Overall, shifting even a small fraction of solar output using batteries can significantly improve solar power's substitutability with coal. \vspace{0.15in} \begin{center} [INSERT Figure 4: The Effect of Battery Storage on the Elasticity of Demand for Coal Power] \end{center} \vspace{0.15in} Consequently, batteries can complement reductions in the cost of intermittent renewables and mitigate the distributional side effects of a carbon tax. As shown in \autoref{fig:battery}, the elasticity of substitution between solar and coal rises significantly as solar becomes less intermittent; this implies that a change in the relative price of solar would have a far greater effect on solar adoption if batteries were employed with solar. Hence, if policy makers aim to promote the use of renewables, they should subsidize research that reduces the cost of renewables as well as research that improves battery technology. Using both policy instruments can be more effective than either alone. Additionally, the second benefit of batteries is that they mitigate the distributional problems of a carbon tax. This is because, by reducing the intermittency of renewables, batteries make demand for fossil fuels less inelastic. We specifically show this elasticity would change with another numerical example in \autoref{table:storelas}; like \autoref{fig:battery}, we model batteries that shift solar output towards the off-peak. We then estimate the elasticity of demand for coal power around its initial price; this elasticity is decreasing at an increasing rate with the percent shift in solar output. The practical implications of this are straightforward. Researching and implementing battery technology can reduce the welfare losses from a carbon tax. Moreover, regions without clean, consistent renewables would benefit in particular, because they will be able to combine intermittent technologies with better energy storage to help transition away from fossil fuel energy. In short, subsidizing battery research can reduce some of the unintended distributional consequences of carbon taxes while mitigating environmental damage. \subsubsection{Learning-by-Doing and Intermittency} Lastly, our results suggest that renewable subsidies aimed at correcting the positive externalities of ``learning-by-doing" should also vary geographically. This is because the adoption rate of a renewable technology depends on its intermittency and the pre-existing technologies used for generation. For instance, hydropower plants can easily substitute for fossil fuel technologies, because hydropower is non-intermittent. Consequently, developing a larger initial capacity of hydropower to promote learning-by-doing can be done with a relatively small subsidy. On the other hand, suppose a region's only source of renewable energy is wind power. In this case, the intermittency of wind power reduces its elasticity of substitution with fossil fuel technologies. Thus, relatively larger subsidies are required to raise the capacity of wind power. Furthermore, the intermittency of renewable technologies (especially wind and solar) is linked to the weather which varies geographically. Hence, two regions with access to the same intermittent renewables at the same cost may still experience differing levels of intermittency based on their location. So, optimal renewable subsidies may also vary geographically simply due to differences in the weather. Thirdly, as discussed earlier, the elasticity of substitution between clean and dirty energy rises when prices differ greatly. This implies that regions with relatively expensive renewables (relatively cheap fossil energy) can benefit more from subsidies; again, this can vary geographically since the costs of both fossil fuels and renewables vary geographically. So, in total, correcting the externality from learning-by-doing should take into account the geographic differences in the availability of renewable technologies, weather conditions that may influence renewable output, and the relative prices of renewable and fossil energy. This implies that optimal renewable subsidies should differ geographically but only to the extent that the spillovers of learning-by-doing balance out differences in welfare gains. That is, recall that subsidies for renewables have two benefits: promoting learning-by-doing and raising consumer and producer surplus. The first effect can spur innovation that affects technology in all regions while the latter effect is mostly local. Now, consider two edge-case policy solutions targeting one benefit but ignoring the other. The first extreme solution may be to maximize learning by doing while ignoring the effects of subsidies on welfare. This solution would suggest that we subsidize renewables wherever they can best substitute for fossil fuels. The other extreme solution would be to distribute subsidies almost equally to ensure welfare gains are equitable; this assumes policymakers do not want to concentrate welfare gains for some regions but not others. Since we know the effect of subsidies on renewable adoption varies geographically, these two extreme solutions must differ; in other words, there's likely a trade-off between efficiently and equitably promoting renewables. Consequently, ignoring an edge case, an intermediate choice between these two approaches is a valid second-best solution for policymakers looking to balance efficiency and equity. Alternatively, a first-best solution would simply maximize efficiency with the first approach and then make welfare outcomes more equitable with direct monetary transfers. And finally, like before with carbon taxes, improving battery technology can also help mitigate the distributional side effects of efficient renewable subsidies. \subsection{Adjusting Future Models} Given that traditional assumption of a CES relationship between renewable and fossil energy appears to be inaccurate, we offer two alternative approaches for future models. The first approach is to directly integrate our model within a given framework. The second approach is to assume a variable elasticity of substitution (VES) structure\footnote{ The VES function was originally introduced by \citet{VES}.} between clean and dirty energy. These two approaches have their own advantages/disadvantages and serve different purposes. \subsubsection{Direct Integration} The first approach is better suited for numerical models. For example, our model of the energy sector can be embedded within a Computational General Equilibrium (CGE) model. CGEs, specifically ones designed to study energy and environmental policy, tend to assume a CES relationship between generation technologies and treat electricity as a homogeneous good. On the other hand, our representation of the energy sector makes more realistic assumptions and directly models the source of imperfect substitutability between generation technologies -- intermittency. Consequently, embedding our model within a CGE would improve its predictive accuracy by giving it more realistic microfoundations. Furthermore, implementation can be done with relatively few modifications to standard CGE. To start, CGEs already use CES utility to model consumer preferences. In order to integrate our method of modeling electricity consumption, a CGE practitioner need only to rewrite preferences as: \begin{equation} U = \left( \sum_{i \neq elc} \alpha_{i} Z_{i}^\eta + \alpha_{elc} \left( \sum_{t=1}^T \alpha_{elc,t} Z_{elc, t}^\phi \right)^{\eta/\phi} \right)^{1/\eta} \end{equation} where the intertemporal elasticity of substitution for electricity consumption is given by $\sigma = 1/(1-\phi)$, the elasticity of substitution between all consumer goods is given by $1/(1-\eta)$, the consumption of each good is given by $Z_{i}$, and the consumption of electricity in each period $t$ is given by $Z_{elc, t}$. This can be simplified to: \begin{align} U &= \left( \left( \sum_{i \neq elc} \alpha_{i} Z_{i}^\eta \right) + \alpha_{elc} Z_{elc}^\eta \right)^{1/\eta} \\ Z_{elc} &= \left( \sum_{t=1}^T \alpha_{elc,t} Z_{elc, t}^\phi \right)^{1/\phi} \end{align} where $Z_{elc}$ is an aggregate electricity good. Next, we consider the production functions for electricity production in each period. These are simply linear, since the total electricity produced in a period is equal to the sum of electricity produced by each technology in that period. That is, we have \begin{equation} Z_{elc,t} = \sum_i \xi_{i,t} X_i \end{equation} where $X_i$ is a generation technology and $\xi_{i,t}$ is its output in period $t$. The parameters for $\xi_{1,t}$ must be estimated using additional data. For fossil fuels, $\xi_i$ can remain constant over time. For intermittent renewables, $\xi_{i,t}$ may be set to empirical estimates of the average output per unit of $X_i$ in period $t$.\footnote{ It is important to note that each period $t$ should describe a short time frame. For instance, each period may represent an hour of a day, or, as in our examples, we may have two periods that represent peak and off-peak. If larger periods are chosen, the underlying assumptions behind our model are no longer relevant. That is, technologies like coal and nuclear can change their output over longer periods of time to respond to changes in price; on the other hand, as discussed earlier, we assume here that output in each period is exogenous.} Additionally, the supply-side equations for building capacity in each technology can be modeled using the usual CGE approach; total cost is given by the sum of costs for each input factor and total quantity is given by a CES of the input factors used to build capacity $(X_i)$. Finally, since these functional forms equivalent\footnote{ Linear production functions simply a special case of CES production functions where the elasticity of substitution is arbitrarily high.} to those used in standard CGEs, they can be calibrated using the usual procedures and a social accounting matrix. So, overall, implementing our model within a CGE can be done in a fairly simple way. \subsubsection{VES Approximation} \vspace{0.15in} \begin{center} [INSERT Figure 5: The VES Approximation of the Elasticity of Substitution between Solar and Coal] \end{center} \vspace{0.15in} The second approach is to use a variable elasticity of substitution function to model the relationship between clean and dirty energy. A VES function would approximately capture the dynamics of $e$, the elasticity of substitution between clean and dirty technology, while keeping a theoretical model relatively simple. The latter is because assuming a VES structure between clean and dirty energy only requires considering one more parameter than a traditional CES function. To be more precise, the VES function is defined in \citet{VES} as \begin{align} Z &= \gamma X_1^{\omega(1-\delta \rho)} \left( X_2 + (\rho - 1) X_1 \right)^{\omega \delta \rho} \\ e &= 1 + \beta (X_1 / X_2) \\ \beta &= (\rho - 1) / ( 1- \delta \rho ) \end{align} \vspace{-4ex} $$\gamma > 0, \quad \omega > 0, \quad0 < \delta < 1, \quad 0 \leq \delta \rho \leq 1 , \quad (X_2/X_1) > -\beta $$ where $\omega, \delta, \rho$, and $\gamma$ are parameters. Additionally, $Z$ is the output, $X_1$ and $X_2$ are the inputs, and $e$ is the elasticity of substitution between $X_1$ and $X_2$. In our case, we can think of $Z$ as electricity output while $X_1$ and $X_2$ represent dirty and clean technology. Note that $e$ is a linear function of $X_1/X_2$; this relationship allows a VES function to approximate the effects of intermittency. To show why, we plot $e$ against its VES approximation $\hat{e}$ for solar and coal in \autoref{fig:ves}. This approximation was fit by running OLS against the numerical results for $e$ while holding the intercept fixed at 1.\footnote{ There also exists a VES function described by \citet{VES} with the form $e = \beta_0 + \beta (X_1/X_2)$. This function may provide a better approximation of $e$, but there is no tractable production function that underlies it for $\beta_0 \neq 1$. Hence, we only consider the case where $\beta_0 = 1$, since the entire point of taking a VES approximation, in this case, is for analytical tractability. } We can see that, as either the capacity of solar or coal dominates the market, the approximation becomes weaker. But, overall, it seems to mimic the shape of the true elasticity of substitution $e$ implied by our model of intermittency. Hence, a VES function is a reasonable alternative to a CES function -- it better models intermittency while also maintaining parsimony. One potential objection to using the VES function is that it assumes the elasticity of substitution $e$ approaches 1 when the majority of generation comes from renewables ($X_1/X_2 \to 0$). And, a VES structure assumes linear $e$ which does not seem to be the case in \autoref{fig:ves}. However, both these assumptions may still be fairly reasonable in the right context. That is, suppose renewable energy was highly intermittent and that the consumer's elasticity of substitution $\sigma = 1$. Firstly, this results in renewable and fossil energy entering a Cobb-Douglas relationship ($e = 1$) as $X_1/X_2 \to 0$ which supports the VES assumption of an intercept of 1. Secondly, no matter the relative prices, whenever the renewable technology is highly intermittent, $e$ becomes perfectly linear with respect to $X_1/X_2$; this supports the VES assumption of linearity. The proofs for both are given in \hyperref[sec:asympeos]{Appendix A.C}. Hence, in these cases, a VES approximation can perfectly match $e$. Additionally, even when consumer's elasticity of substitution is not $1$, $e$ may still be approximately linear; this is shown in \autoref{fig:ves_int} where we consider our estimated $\hat{\sigma} = 0.8847$. In short, the assumptions made by a VES structure may nevertheless be accurate in certain cases. Overall, using a VES function is a reasonable alternative to directly modeling intermittency. It allows theorists to derive policy implications from intermittency without relying too heavily on numerical models. Furthermore, it keeps theory simple since the elasticity of substitution can be represented as a linear function of a single parameter $\beta$. And, this parameter can be estimated econometrically, so theoretical models assuming a VES structure can offer empirically-based results. Finally, it may even be of use for CGE modelers. Its structure is simple enough to be calibrated with a given set of data, while its implementation can produce simple but reasonably accurate models of energy sectors with intermittency. \section{Conclusion} In this paper, we have offered a framework for understanding the economics of intermittent renewable energy that bears in mind the critique offered by \citet{Joskow2011}. That is, our model evaluates the economic feasibility of a technology by integrating its production profile with the market value of electricity in equilibrium. Moreover, this equilibrium is reached using a fairly realistic model of consumer behavior. The empirical parameterization of our model and its numerical results suggest that the elasticity of substitution between renewable and fossil energy is non-constant. We have shown that this has important implications for policy and future models. With respect to policy, we have shown (i) the welfare effects of carbon taxes and renewable subsidies depend on the intermittency of renewables and thus vary geographically; (ii) the effect that renewable subsidies have on promoting the adoption of renewables varies geographically; (iii) the combination of (i) and (ii) can create a trade-off between efficiently and equitably preventing climate change; (iv) subsidizing battery research can complement other policies by increasing the substitutability of renewable and fossil energy; (v) improving batteries can mitigate the unintentional distributional consequences of carbon taxes and research subsidies. Additionally, we have qualitatively evaluated the results of \citet{Ace2012} in a setting where renewables and fossil fuels have a non-linear elasticity of substitution. Based on their framework, we have further argued that: (vi) the short-run cost of policy intervention is decreasing with intermittency; (vii) the cost of delaying intervention is decreasing with intermittency; (viii) when the discount rate is sufficiently low and the intermittency of renewables is sufficiently high, a laissez-faire equilibrium would require policy intervention to prevent disaster; (ix) when renewables are moderately intermittent, an optimal carbon tax should remain approximately constant over time. Given how strongly the elasticity of substitution between renewable and fossil energy varies with relative price, future models of intermittency should not assume a CES structure. So, we have shown to integrate our framework within a numerical model, such as a CGE; this may improve predictive accuracy by providing more realistic microfoundations. On the other hand, theoretical models would be better served by more a analytically tractable alternative. Therefore, we suggest theoretical models use a VES production function to approximate the effect of intermittency on the elasticity of substitution between renewable and fossil energy. Under certain conditions, our own model simplifies to a VES case; hence, the VES production function can reasonably approximate for our full model in a more tractable way. While constructing our model, we made several simplifications for tractability. For instance, we set aside modeling reliability -- stochastic or otherwise unpredictable variation in output. An aim for future research may be to develop a model of clean and dirty energy that incorporates both intermittency and reliability in a multi-period setting. This could involve integrating our framework with that of \citet{HH}. Additionally, another avenue for future research may be to explicitly model the effects of battery technology and how storage interacts with intermittency and reliability. Such models can offer more accurate predictions/suggestions for policymakers and bring theory closer to reality. \section{Acknowledgments} This work was supported by the Northeast Sun Grant \#5701-RU-SDSU-G640. We would also like to thank Prof. Roger Klein and the participants of the NAREA 2019 Workshop for helpful comments.\\\\ \noindent \textbf{Transparency and Openness Promotion Statement}: The data and code for replication are available \href{https://github.com/SA3291/Energy-Intermittency-Paper.git}{here}. \begin{thebibliography}{9} \bibitem[Acemoglu et al.(2012)]{Ace2012} Acemoglu, D., Aghion, P., Bursztyn, L., \& Hemous, D. 2012. ``The Environment and Directed Technical Change." \textit{American Economic Review} 102(1): 131–166. https://doi.org/10.1257/aer.102.1.131 \bibitem[Adelman(1995)]{genie} Adelman, M. A. 1995. \textit{The Genie Out of the Bottle: World Oil since 1970}. MIT Press. \bibitem[Ambec and Crampes(2012)]{AC2012} Ambec, S., \& Crampes, C. 2012. ``Electricity provision with intermittent sources of energy." \textit{Resource and Energy Economics} 34(3): 319–336. https://doi.org/10.1016/j.reseneeco.2012.01.001 \bibitem[Aubin et al.(1995)]{Aubin} Aubin, C., Fougere, D., Husson, E., \& Ivaldi, M. 1995. ``Real-time Pricing of Electricity for Residential Customers: Econometric Analysis of an Experiment." \textit{Journal Of Applied Econometrics} 10(S1): S171-S191. doi:10.1002/jae.3950100510 \bibitem[Borenstein(2012)]{Boren2012} Borenstein, S. 2012. ``The Private and Public Economics of Renewable Electricity Generation." \textit{Journal of Economic Perspectives} 26(1): 67–92. https://doi.org/10.1257/jep.26.1.67 \bibitem[Chao(2011)]{Chao2011} Chao, H. 2011. ``Efficient Pricing and Investment in Electricity Markets with Intermittent Resources." \textit{Energy Policy} 39(7): 3945–3953. https://doi.org/10.1016/j.enpol.2011.01.010 \bibitem[Delarue et al.(2010)]{Delarue} Delarue, E., De Jonghe, C., Belmans, R., and D’Haeseleer, W. 2010. ``Applying Portfolio Theory to the Electricity Sector: Energy versus Power." \textit{Energy Economics} 33(1): 12–23. https://doi.org/10.1016/j.eneco.2010.05.003 \bibitem[Deryugina and Mackay(2017)]{Deryugina} Deryugina, T., Mackay, A., and Reif, J. 2017. ``The Long-Run Dynamics of Electricity Demand: Evidence from Municipal Aggregation." \textit{NBER Working Paper Series} https://doi.org/10.3386/w23483 \bibitem[EIA(2019a)]{EIANetgen} [dataset] Electricity Information Administration. 2019a. ``Net generation, United States, all sectors, annual. Electricity Data Browser." https://www.eia.gov/electricity/data/browser \bibitem[EIA(2019b)]{EIALCOE} Electricity Information Administration. 2019b. ``Levelized Cost and Levelized Avoided Cost of New Generation." \textit{Annual Energy Outlook 2019} https://www.eia.gov/outlooks/aeo/pdf/electricity\_generation.pdf \bibitem[EIA(2019c)]{EIArenew} Electricity Information Administration. 2019c. ``U.S. Renewable Electricity Generation has Doubled since 2008." \textit{Today in Energy} https://www.eia.gov/todayinenergy/detail.php?id=38752 \bibitem[Fan and Hyndman(2011)]{Fan} Fan, S., \& Hyndman, R. 2011. ``The price elasticity of electricity demand in South Australia." \textit{Energy Policy} 39(6): 3709–3719. https://doi.org/10.1016/j.enpol.2011.03.080 \bibitem[Foley et al.(2012)]{Foley2012} Foley, A., Leahy, P., Marvuglia, A., and Mckeogh, E. 2012. ``Current methods and advances in forecasting of wind power generation." \textit{Renewable Energy} 37(1): 1–8. https://doi.org/10.1016/j.renene.2011.05.033 \bibitem[Geroski(1957)]{Geroski2000} Geroski, P. 2000. ``Models of technology diffusion." \textit{Research Policy}. https://doi.org/10.1016/S0048-7333(99)00092-X \bibitem[Helm and Mier(2019)]{HH} Helm, C. and Mier, M. 2019. ``On the Efficient Market Diffusion of Intermittent Renewable Energies." \textit{Energy Economics} 80: 812–830. https://doi.org/10.1016/j.eneco.2019.01.017 \bibitem[Herriges et al.(1993)]{Herriges} Herriges, J., Baladi, S., Caves, D., and Neenan, B. 1993. ``The Response of Industrial Customers to Electric Rates Based Upon Dynamic Marginal Costs." \textit{The Review of Economics and Statistics} 75(3): 446-454. https://doi.org/10.2307/2109458 \bibitem[Joskow(2011)]{Joskow2011} Joskow, P. 2011. ``Comparing the Costs of Intermittent and Dispatchable Electricity Generating Technologies." \textit{American Economic Review} 101(3): 238–241. https://doi.org/10.1257/aer.101.3.238 \bibitem[King and Shatrawka(2011)]{KS1994} King, K. and Shatrawka, P. 1994. "Customer Response to a Permanent Time- Varying Pricing Program in the United Kingdom." Madison, WI: Laurits R. Christensen Associates. \bibitem[Mohajeryami et al.(2016)]{Moha2016} Mohajeryami, S., Moghaddam, I., Doostan, M., Vatani, B., and Schwarz, P. 2016. ``A Novel Economic Model for Price-Based Demand Response." \textit{Electric Power Systems Research} 135: 1–9. https://doi.org/10.1016/j.epsr.2016.03.026 \bibitem[Musgens and Neuhoff(2006)]{MN2006} Musgens, F., and Neuhoff, K. 2006.`` Modelling Dynamic Constraints in Electricity Markets and the Costs of Uncertain Wind Output." \bibitem[Neuhoff et al.(2007)]{NCK2007} Neuhoff, K., Cust, J. and Keats, K. 2007. ``Implications of intermittency and transmission constraints for renewables deployment." \textit{Cambridge Working Papers in Economics 0711}. \bibitem[Newey(1990)]{Newey} Newey, W. 1990. ``Efficient Instrumental Variables Estimation of Non-linear Models." \textit{Econometrica} 58(4): 809–837. https://doi.org/10.2307/2938351 \bibitem[ORNL(2004)]{ORNL} Oak Ridge National Laboratory 2004. ``Measurement Practices for Reliability and Power Quality. a Toolkit of Reliability Measurement Practices." U.S. Department of Energy. https://info.ornl.gov/sites/publications/Files/Pub57467.pdf \bibitem[Papageorgiou et al.(2017)]{Pap} Papageorgiou, C., Saarn, M., and Schulte, P. 2017. ``Substitution Between Clean and Dirty Energy Inputs: A Macroeconomic Perspective." \textit{Review of Economics and Statistics} 99(2): 201–212. https://doi.org/10.1162/REST\_a\_00623 \bibitem[Reiss and White(2005)]{Reiss} Reiss, P., and White, M. 2005. ``Household Electricity Demand, Revisited." \textit{The Review of Economic Studies} 72(3): 853-883. http://www.jstor.org/stable/3700676 \bibitem[Revankar(1971)]{VES} Revankar, N. 1971. ``A Class of Variable Elasticity of Substitution Production Functions." \textit{Econometrica} 39(1): 61–71. https://doi.org/10.2307/1909140 \bibitem[Schwarz et al.(2002)]{Schwarz} Schwarz, P., Taylor, T., Birmingham, M., and Dardan, S. 2002. ``Industrial Response to Electricity Real-Time Prices: Short Run and Long Run." \textit{Economic Inquiry} 40(4): 597–610. https://doi.org/10.1093/ei/40.4.597 \bibitem[Shahriari and Blumsack(2018)]{SB2018} Shahriari, M., and Blumsack, S. 2018. ``The Capacity Value of Optimal Wind and Solar Portfolios." \textit{Energy} 148: 992–1005. https://doi.org/10.1016/j.energy.2017.12.121 \bibitem[Silverman(1986)]{Silverman} Silverman, B. 1986. \textit{Density Estimation for Statistics and Data Analysis}. London: Chapman and Hall. \bibitem[Zhou and Teng(2013)]{Zhou} Zhou, S. and Teng, F., 2013. ``Estimation of urban residential electricity demand in China using household survey data." \textit{Energy Policy} 61: 394–402. \bibitem[Staiger and Stock(1997)]{SS1997} Staiger, D. and Stock, J. 1997. Instrumental Variables Regression with Weak Instruments. \textit{Econometrica} 65(3): 557–586. https://doi.org/10.2307/2171753 \bibitem[US BEA(2019)]{USBEA} U.S. Bureau of Economic Analysis. 2019. ``Personal consumption expenditures: chain-type price index." \textit{FRED, Federal Reserve Bank of St. Louis}. https://fred.stlouisfed.org/series/PCEPI \bibitem[Vos et al.(2019)]{nCLIMDIV} [dataset] Vos, R.S., Applequist, S., Squires, M., Durre, I., Menne, M.J., Williams, C.N., Fenimore, C., Gleason, K., Arndt, D. 2019. ``NOAA's Gridded Climate Divisional Dataset (CLIMDIV)", NOAA National Climatic Data Center. doi:10.7289/V5M32STR \bibitem[Wolak and Patrick(2001)]{Wolak} Wolak, F. and Patrick, R. 2001. ``The Impact of Market Rules and Market Structure on the Price Determination Process in the England and Wales Electricity Market." \textit{NBER Working Paper Series}. https://doi.org/10.3386/w8248 \bibitem[Woo et al.(1996)]{Woo} Woo, C., Chow, P., and Horowitz, I. 1996. ``Optional Real-Time Pricing of Electricity for Industrial Firms." \textit{Pacific Economic Review} 1(1): 79–92. https://doi.org/10.1111/j.1468-0106.1996.tb00175.x \bibitem[Zarnikau(1990)]{Zarnikau} Zarnikau, J. 1990. ``Customer Responsiveness to Real-Time Pricing of Electricity." \textit{The Energy Journal} 11(4): 99. https://doi.org/10.5547/ISSN0195-6574-EJ-Vol11-No4-6 \end{thebibliography} \clearpage \section{Appendix A: Supplementary Proofs} \subsection{Cobb-Douglas Case with Two Periods \& Two Technologies} \label{sec:cobbdoug} In this section, we consider a simpler case of our general model to better understand its implications. Firstly, we restrict the utility function to its Cobb-Douglas form which is simply the case where the elasticity of substitution $\sigma = 1$. Secondly, we limit the number of periods and technologies to 2. And, thirdly, we normalize the prices such that our representative consumer's income $I$ is $1$. \subsubsection{Equilibrium Results} Firstly, our demand equations simplify to: \begin{align} Z_t &= \alpha_t / p_t \\ Z_s &= \alpha_s / p_s \end{align} where $t$ and $s$ are our two periods. Next, solving for the FOC condition for profit maximization, we have: \begin{align*} p &= \xi^{-1} c \\ p &= \begin{pmatrix} -\dfrac{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}}{\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}} \\[2ex] \dfrac{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}}{\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}} \end{pmatrix} \end{align*} And, substituting back into our demand equations, we find the equilibrium quantities for $Z$ and $X$. $$ Z = \begin{pmatrix} \dfrac{\alpha _{t}\,\left(\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}\right)}{c_{2}\,\xi _{\mathrm{1s}} - c_{1}\,\xi _{\mathrm{2s}}} \\[2ex] \dfrac{\alpha _{s}\,\left(\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}\right)}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}} \end{pmatrix} \implies X = \begin{pmatrix} \dfrac{\alpha _{t}\,\xi _{\mathrm{2s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}}+\dfrac{\alpha _{s}\,\xi _{\mathrm{2t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}} \\[2ex] -\dfrac{\alpha _{t}\,\xi _{\mathrm{1s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}}-\dfrac{\alpha _{s}\,\xi _{\mathrm{1t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}} \end{pmatrix} $$ Furthermore, we derive restrictions on the parameters $\xi$ and $c$ by assuming $Z, X > 0$. These restrictions are detailed in \autoref{tab:paramrest}. There are two possible sets of symmetrical restrictions. The first set, Case 1, assumes that technology 2 is more cost effective in period $t$, while the second set, Case 2, assumes that technology 1 is more cost effective in period $t$. If a given set of parameters do not fall into either case, we are left with an edge case where one of the technologies is not used. Additionally, these inequalities compare two types of efficiency -- output efficiency and cost efficiency; we define output efficiency as electricity output per unit of input and cost efficiency in terms of electricity output per dollar of input. We refer to the last set of restrictions as mixed, because they relate both cost and output efficiency. \vspace{0.15in} \begin{center} [INSERT Table 7: Parameter Restrictions for $Z, X > 0$] \end{center} \vspace{0.15in} \hfill\\ \begin{proof} We aim to derive conditions on $\xi$ and $c$ required to have positive $Z$ and $X$, so we begin by assuming $X, Z > 0$. Second, since the equations so far are symmetrical, note that there be two symmetrical sets of potential restrictions we must impose on the parameters. Thus, we first assume the inequality $c_1 \xi_{2t} - c_2 \xi_{1t} > 0$ to restrict ourselves to one of the two cases. This assumption results in the denominator of $Z_s$ being positive. Hence, we must also have $\xi_{1s}\xi_{2t} - \xi_{2s}\xi_{1t} > 0 $ for $Z_s > 0$. This same term appears in the numerator for $Z_t$, hence its denominator must be positive: $c_2 \xi_{1s} - c_1 \xi_{2s} > 0$. Now, rewriting these inequalities, we have: \begin{align*} c_1 \xi_{2t} - c_2 \xi_{1t} > 0 &\implies \xi_{2t}/c_2 > \xi_{1t}/c_1 \\ c_2 \xi_{1s} - c_1 \xi_{2s} > 0 &\implies \xi_{1s}/c_1 > \xi_{2s}/c_2 \\ \xi_{1s}\xi_{2t} - \xi_{2s}\xi_{1t} > 0 &\implies \xi_{1s}/\xi_{1t} > \xi_{2s}/\xi_{2t} \\ &\implies \xi_{1t}/\xi_{1s} < \xi_{2t}/\xi_{2s} \end{align*} Note that the latter two restrictions can be derived from the former two. Additionally, we implicitly assume that we have $\xi > 0$. However, this is not necessary assumption, since $\xi$ invertible only requires $\xi_{1t} \xi_{2s} > 0$ or $\xi_{1s} \xi_{2t} > 0$. Instead, we may leave the latter two inequalities in the form $ \xi_{1s}\xi_{2t} > \xi_{2s}\xi_{1t}$ which remains valid when values of $\xi$ are equal to $0$. Lastly, the mixed efficiency restrictions come from $X > 0$. To start, for $X_1$, we have: \begin{align*} X_1 > 0 &\implies (\alpha_t \xi_{2s})(c_1 \xi_2t - c_2\xi_1t) + (\alpha_s \xi_{2t})(c_1 \xi_{2s} - c_2 \xi_{1s}) < 0\\ &\implies (\alpha_t \xi_{2s})(c_1 \xi_2t - c_2\xi_1t) < (\alpha_s \xi_{2t})(c_2 \xi_{1s} - c_1 \xi_{2s}) \\ &\implies (\xi_{2s}/\xi_{2t}) < (\alpha_s (c_2 \xi_{1s} - c_1 \xi_{2s}))/(\alpha_t(c_1 \xi_{2t} - c_2\xi_{1t})) \\ &\implies (\xi_{2s}/\xi_{2t}) < (\alpha_s (\xi_{1s}/c_1 - \xi_{2s}/c_2))/(\alpha_t(\xi_{2t}/c_2 - \xi_{1t}/c_1)) \end{align*} Similarly, for $X_2$, note that only the numerators differ; $\xi_{2s}$ is replaced with $-\xi_{1s}$ and $\xi_{2t}$ is replaced with $-\xi_{1t}$. Hence, we have \begin{align*} X_2 > 0 &\implies (\alpha_t \xi_{1s})(c_1 \xi_2t - c_2\xi_1t) + (\alpha_s \xi_{1t})(c_1 \xi_{2s} - c_2 \xi_{1s}) > 0\\ &\implies (\xi_{1s}/\xi_{1t}) > (\alpha_s (\xi_{1s}/c_1 - \xi_{2s}/c_2))/(\alpha_t(\xi_{2t}/c_2 - \xi_{1t}/c_1)) \end{align*} To double check, note that combining the inequalities from $X_1>0$ and $X_2 > 0$ leads to $\xi_{2s}/\xi_{2t} < \xi_{1s}/\xi_{1t}$. This is precisely the earlier result obtained from $Z > 0$. Again, it is important to note that we assume $\xi > 0$ for to simplify the inequalities of $X_1 > 0$ and $X_2 > 0$ . Otherwise, we may leave the inequalities in their original forms and they are still valid when $\xi_{1t} \xi_{2s} > 0$ or $\xi_{1s} \xi_{2t} > 0$. \\ \hfill \end{proof} Let us consider the set of restrictions belonging to Case 1. The first inequality, our initial assumption, states that technology 2 is relatively more cost effective in period $t$. The second inequality claims technology 1 is relatively more cost effective in period $s$. The implications are fairly straightforward; if a technology is to be used, it must have an absolute advantage in cost efficiency in at least one period. The third condition states that the relative output efficiency of technology 2 is greater than that of the first technology in period $t$. And, the fourth condition makes a symmetrical claim but for the technology 1 and period $s$. These latter two restrictions regarding output efficiency enter $Z$ and $X$ through $p$; they're simply a restatement of the invertibility of $\xi$ and can also be derived through the cost efficiency restrictions. The mixed efficiency restrictions are less intuitive. Firstly, note that $\left(\xi_{1s}/c_1 - \xi_{2s}/c_2\right)$ is the difference in cost efficiency for the two technologies in period $s$; this is equivalent to the increase in $Z_s$ caused by shifting a marginal dollar towards technology 1. Similarly, the bottom term $\left( \xi_{2t}/c_2 - \xi_{1t}/c_1 \right)$ represents the change in $Z_t$ caused by shifting a marginal dollar towards technology 1. Both these terms are then multiplied by the share parameter of the utility function for their respective time periods. Furthermore, note that $\alpha_t$ $(\alpha_s)$ is the elasticity of utility with respect to $Z_t$ $(Z_t)$. Hence, in total, the mixed efficiency restrictions relate the relative cost efficiencies of each technology with their output efficiency and the demand for energy. So, for example, suppose that consumers prefer, \textit{ceteris paribus}, that nearly all their electricity arrives in period $t$. This would imply $\alpha_t$ is arbitrarily large which results in the left-hand side of the fraction becoming arbitrarily small. This violates the first mixed efficiency restriction but not the second; consequently, use of the first technology, which is less cost effective in period $t$, approaches $0$. In more practical terms, suppose that our first technology is coal power and the latter is solar power. Although coal power is dispatchable, it does not easily ramp up or down within a day; hence, it is reasonable to apply our model where capacities are fixed over time so long as our time frame is sufficiently short. Hence, we now assume periods $t$ and $s$ represent the peak and off-peak for a day. And, we expect that there is more available solar radiation during peak hours than off-peak hours, since peak hours are usually during the middle of the day. This implies that the output efficiency of solar power is higher in period $t$ due to more available solar radiation. Additionally, since the energy output of a unit of coal is independent of time, we know that the output efficiency of coal is constant. In total, this implies that we have met the output efficiency restrictions, since we have $\xi_{2t}/\xi_{2s} > \xi_{1t}/\xi_{1s}$. Next, we can reasonably assume that coal is more cost effective than solar in the off-peak period when there is less sun; hence, the second cost efficiency restriction is satisfied. Then, for there to be an incentive to use solar power, we must satisfy the first cost-efficiency condition; that is, solar needs be cost effective during peak hours otherwise we hit an edge case where no solar is employed. And, finally, solar must also satisfy the mixed efficiency condition, which essentially implies that there must be sufficient demand for electricity during period $t$, when solar is more effective, for it to be a feasible technology. So, overall, for a technology to be economical, it must meet three conditions: it must the most cost effective technology for a particular period, it must have a comparative advantage in output efficiency in the same period, and there must be a sufficient amount of demand in that period. %\footnote{This same analysis can be further extended to any $n$ technologies. However, the number restrictions and different cases to ensure $X , Z > 0$ expands very quickly ($O(n!)$). For instance, if we had 3 technologies and 3 periods, we must first assume each technology is more cost effective than the other two in a unique period; this adds 3*2 restrictions. Then, we must make the output efficiency restrictions comparing each pair of technologies for each pair of periods.} \subsubsection{Comparative Statics} The comparative statics are similarly intuitive. The equilibrium quantity of a technology is increasing with its output efficiency and decreasing with its cost per unit. Additionally, the equilibrium quantities for a particular technology move in the opposite direction with respect to the output efficiency and cost of the other technologies. For a practical example, consider again coal and solar power from before. An increase in the output efficiency of solar or a decrease in solar power's cost will reduce the optimal quantity of coal power. Likewise, as coal power's efficiency improves, it's adoption rises. To find the effects of $\alpha$ on $X$, we must assume one of the cases of restrictions shown in \autoref{tab:paramrest}. So, again, let us assume Case 1 is true; this implies that $X_2$ is the most cost effective technology in period $t$ and likewise for $X_1$ in period $s$. Firstly, note that $\alpha$ determines the demand for electricity in a period. Hence, when $\alpha_t$ rises, we see the optimal level of $X_2$ rise as well; likewise, $X_1$ rises with $\alpha_s$. In short, the optimal quantity of a technology rises linearly with the demand for electricity in the period it specializes in. Moreover, these relationships are reversed with respect to demand in each technology's suboptimal period. So, for example, we would expect the use of solar energy to rise when the demand for electricity during peak hours rises, and it would fall when demand for energy in the off-peak rises. On the other hand, use coal power would rise with off-peak demand and fall with peak demand. This concept carries through for the comparative statics of $Z$. When the output efficiency of technology 1 rises or its cost falls, we see output $Z_s$ rise and output $Z_t$ fall. This is because technology 1 is optimal in period $s$ given the Case 1 restrictions. Likewise, we see symmetrical results for the output with respect to the cost and output efficiency of technology 2; improvements in the efficiency of $X_2$ result in greater output in $Z_t$ and smaller output in $Z_s$. \hfill \\ \begin{proof} We begin by deriving the comparative statics of the cost and efficiency parameters with respect to $X$. Firstly, we take derivatives with respect to the cost vectors: \begin{align*} \frac{\partial X_1}{\partial c} &= \begin{pmatrix} \dfrac{-\alpha _{t}\,{\xi _{\mathrm{2s}}}^2}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2}-\dfrac{\alpha _{s}\,{\xi _{\mathrm{2t}}}^2}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2}<0 \\ \dfrac{\alpha _{t}\,\xi _{\mathrm{1s}}\,\xi _{\mathrm{2s}}}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2}+\dfrac{\alpha _{s}\,\xi _{\mathrm{1t}}\,\xi _{\mathrm{2t}}}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2}>0 \end{pmatrix}\\ \frac{\partial X_2}{\partial c} &= \begin{pmatrix} \dfrac{\alpha _{t}\,\xi _{\mathrm{1s}}\,\xi _{\mathrm{2s}}}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2}+\dfrac{\alpha _{s}\,\xi _{\mathrm{1t}}\,\xi _{\mathrm{2t}}}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2}>0 \\ \dfrac{-\alpha _{t}\,{\xi _{\mathrm{1s}}}^2}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2}-\dfrac{\alpha _{s}\,{\xi _{\mathrm{1t}}}^2}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2}<0 \end{pmatrix} \end{align*} The first and second terms of $\partial X_1 / \partial c_1$ are clearly both negative independent of the restrictions on the parameters. Similarly, all terms of $\partial X_1 / \partial c_2$ are positive independent of any restrictions. Since the structure of this problem is symmetrical with respect to $X_1$ and $X_2$, the same comparative statics apply but in reverse for $X_1$. Next, we derive comparative statics for each element of $\xi$. \begin{alignat*}{1} \frac{\partial X_1}{\partial \xi} &= \begin{pmatrix} \dfrac{\alpha _{s}\,c_{2}\,\xi _{\mathrm{2t}}}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2}>0 & \dfrac{\alpha _{t}\,c_{2}\,\xi _{\mathrm{2s}}}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2}>0 \\ \dfrac{-\alpha _{s}\,c_{2}\,\xi _{\mathrm{1t}}}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2}<0 & \dfrac{-\alpha _{t}\,c_{2}\,\xi _{\mathrm{1s}}}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2}<0 \\ \end{pmatrix}\\ \frac{\partial X_2}{\partial \xi} &= \begin{pmatrix} \dfrac{-\alpha _{s}\,c_{1}\,\xi _{\mathrm{2t}}}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2} <0& \dfrac{-\alpha _{t}\,c_{1}\,\xi _{\mathrm{2s}}}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2} <0\\ \dfrac{\alpha _{s}\,c_{1}\,\xi _{\mathrm{1t}}}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2}>0& \dfrac{\alpha _{t}\,c_{1}\,\xi _{\mathrm{1s}}}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2} >0\\ \end{pmatrix} \end{alignat*} Again, the signs are fairly straightforward. The optimal quantity of $X_1$ increases with its output efficiency in both periods; however, it decreases with the output efficiency of $X_2$ in both periods. Similarly, symmetrical results are shown for $X_2$. Next, we study the effects of $\alpha$ on $X$; this requires us to place some restrictions on the parameters, so we use those belonging to Case 1 in \autoref{tab:paramrest}. With $\alpha \equiv \left( \alpha_t \;\; \alpha_s \right)^T$, \begin{align*} \frac{\partial X_1}{\partial \alpha} &= \begin{pmatrix} \dfrac{\xi _{\mathrm{2s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}}<0 \\ \dfrac{\xi _{\mathrm{2t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}}>0 \end{pmatrix}\\ \frac{\partial X_2}{\partial \alpha} &= \begin{pmatrix} \dfrac{-\xi _{\mathrm{1s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}}>0 \\ \dfrac{-\xi _{\mathrm{1t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}}<0 \end{pmatrix} \end{align*} Note that our restrictions imply that $c_1 \xi_{2t} - c_2 \xi_{1t} > 0$ and $c_2 \xi_{1s} - c_1 \xi_{2s} > 0$. From here, the intuition is clear; we assume that $X_2$ is more cost efficient in period $t$, so increases in demand during period $t$ (caused by increases in $\alpha_t$) will increase the optimal quantity of $X_2$. And, the same applies to $X_1$ with respect to period $s$ and $\alpha_s$. Again, due to symmetry, the statics are reversed when the technologies are flipped. Similarly, the signs would also be flipped if we used the restrictions given by Case 2 instead. Next, we derive the comparative statics for $Z$. From our restrictions, we have $\xi_{1s}\xi_{2t} > \xi_{2s}\xi_{1t}$. All the results above follow from this inequality and the cost efficiency restrictions. \begin{align*} \frac{\partial Z_t}{\partial c} &= \begin{pmatrix} \dfrac{\alpha _{t}\,\xi _{\mathrm{2s}}\,\left(\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}\right)}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2}>0\\ \dfrac{-\alpha _{t}\,\xi _{\mathrm{1s}}\,\left(\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}\right)}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2}<0 \end{pmatrix}\\ \frac{\partial Z_s}{\partial c} &= \begin{pmatrix} \dfrac{-\alpha _{s}\,\xi _{\mathrm{2t}}\,\left(\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}\right)}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2} <0\\ \dfrac{\alpha _{s}\,\xi _{\mathrm{1t}}\,\left(\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}\right)}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2} >0 \end{pmatrix}\\ \frac{\partial Z_t}{\partial \xi} &= \begin{pmatrix} \dfrac{\alpha _{t}\,\xi _{\mathrm{2s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}} < 0& \dfrac{-\alpha _{t}\,\xi _{\mathrm{2s}}\,\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2} <0 \\ \dfrac{-\alpha _{t}\,\xi _{\mathrm{1s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}} >0& \dfrac{\alpha _{t}\,\xi _{\mathrm{1s}}\,\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}{{\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}^2} >0\\ \end{pmatrix}\\ \frac{\partial Z_s}{\partial \xi} &= \begin{pmatrix} \dfrac{-\alpha _{s}\,\xi _{\mathrm{2t}}\,\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2}> 0& \dfrac{\alpha _{s}\,\xi _{\mathrm{2t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}} > 0\\ \dfrac{\alpha _{s}\,\xi _{\mathrm{1t}}\,\left(c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}\right)}{{\left(c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}\right)}^2} < 0& \dfrac{-\alpha _{s}\,\xi _{\mathrm{1t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}} <0\\ \end{pmatrix} \end{align*} Again, recall that we have $c_1 \xi_{2t} - c_2 \xi_{1t} > 0$ and $c_2 \xi_{1s} - c_1 \xi_{2s} > 0$; the rest follows. And finally, we have: \begin{align*} \frac{\partial Z_t}{\partial \alpha} &= \begin{pmatrix} \dfrac{-\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}} > 0 \\ 0 \end{pmatrix} \\ \frac{\partial Z_s}{\partial \alpha} &= \begin{pmatrix} 0 \\ \dfrac{\xi _{\mathrm{1s}}\,\xi _{\mathrm{2t}}-\xi _{\mathrm{1t}}\,\xi _{\mathrm{2s}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}} > 0 \end{pmatrix} \end{align*} These are fairly trivial, since $Z_t = \alpha_t / p_t$ (and $Z_s = \alpha_s/ p_s$) and prices are positive. \\ \hfill \end{proof} %\subsection{Optimal Taxation} % %The following proof is a derivation of the optimal tax $\tau$ on $X_1$ in the case where it produces externalities. The marginal cost of pollution is given by $\gamma$. %\hfill \\ %\begin{proof} We aim to maximize welfare with respect to the tax $\tau$, hence our first order condition is: % \begin{align*} % 0 &= \frac{\partial CS}{\partial \tau} + \frac{\partial PS}{\partial \tau} - \frac{\partial \gamma X_1}{\partial \tau} + \frac{\partial \tau X_1}{\partial \tau} \\ % &= \left( \underbrace{\frac{\partial CS}{\partial p} + \frac{\partial PS}{\partial p}}_{= 0} - \frac{ \partial \gamma X_1}{\partial p} + \frac{\partial \tau X_1}{\partial p} \right) \frac{\partial p}{\partial \tau} \\ % &= \left( 0 - \frac{ \partial \gamma X_1}{\partial p} + \frac{\partial \tau X_1}{\partial p} \right) \frac{\partial p}{\partial \tau} \\ % &= - \gamma \frac{\partial X_1}{\partial \tau} + \tau \frac{\partial X_1}{\partial \tau} % \end{align*} % where the derivatives of producer and consumer surplus are eliminated by the envelope theorem. Therefore, we must have $\tau = \gamma$. \\ % \hfill %\end{proof} \subsection{CES Production as a Special Case} \label{sec:CESspecialcase} Our framework nests the case where there exists a CES production structure between each technology. This occurs when each technology can only produce in a single, unique period; note that this is not a realistic scenario. For instance, this would occur if we had one technology that can only output electricity during the day and another that only outputs electricity at night. Anyways, in this case, the CES production function's elasticity parameter will be equivalent to that of the consumer's CES utility function -- the intertemporal elasticity of substitution for electricity consumption. \begin{proof} Firstly, note that we can reindex our technologies such that $\xi$ is diagonal, since each technology only produces in one period. Hence, without loss of generality, we have diagonal $\xi$. Next, we may say that the electricity output in period $i$ is given by $Z_i = \xi_{i,i} X_i$. Now, recall that the FOC for profit-maximization is given by $p = \xi^{-1} c$, hence we have $p_i = c_i / \xi_{i,i}$. Combining these equations with the FOC for utility maximization, we have: \begin{align*} \frac{Z_i}{Z_j} &= \left( \frac{\alpha_i p_j}{\alpha_j p_i} \right)^\sigma \\ \implies \frac{ X_i }{X_j} &= \left( \frac{ \alpha_i p_j \xi_{j,j}^{1/\sigma} }{ \alpha_j p_i \xi_{i,i}^{1/\sigma} } \right)^\sigma \\ \implies \frac{ X_i }{X_j} &= \left( \frac{ \alpha_i c_j \xi_{j,j}^{1/\sigma - 1} }{ \alpha_j c_i \xi_{i,i}^{1/\sigma - 1} } \right)^\sigma \end{align*} By definition, the elasticity of substitution between any two, arbitrary technologies $i$ and $j$ is constant. Moreover, it can be shown that this FOC can be rearranged to give the following demand equation for each technology $i$ \begin{align} X_i &= \left(\frac{\beta_i}{c_i} \right)^\sigma \frac{I}{P} \\ P &= \sum_t \beta_i^\sigma p_i^{1-\sigma} \end{align} where $\beta_i = \alpha_i \xi_{i,i}^{-\phi}$, $\sigma = 1 / (1-\phi)$, and $I$ is the consumers income. So, in total, accounting for both the producer and consumer's objectives, we are essentially solving for: \begin{align*} V &= \left( \sum_i \beta_i X_i^\phi \right)^{(1/\phi)} \\ \text{such that\quad} I &= \sum_i c_i X_i \end{align*} This is a standard CES function. \\ \hfill \end{proof} \subsection{Asymptotic Elasticity of Substitution} \label{sec:asympeos} Suppose we are in a two-period, two-technology setting with $\sigma = 1$. Furthermore, suppose that the output of our first technology is constant in both periods, $\xi_{1t} = \xi_{2t}$, but the output of our second technology is zero in the second period $\xi_{2s} = 0$. And, assume we have the parameter restrictions mentioned in earlier in \autoref{tab:paramrest} that ensure $X, Z > 0$. This is a simple case where we have (1) a constant output technology and (2) a highly intermittent technology. We now show that, in this case, the elasticity of substitution approaches $1$ as the relative cost of our second technology $c_2/c_1$ approaches $0$. Furthermore, we show that the elasticity of substitution between $X_1$ and $X_2$ is a linear function of $X_1/X_2$. \begin{proof} Firstly, note that from earlier we have: $$ X = \begin{pmatrix} \dfrac{\alpha _{t}\,\xi _{\mathrm{2s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}}+\dfrac{\alpha _{s}\,\xi _{\mathrm{2t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}} \\[2ex] -\dfrac{\alpha _{t}\,\xi _{\mathrm{1s}}}{c_{1}\,\xi _{\mathrm{2s}}-c_{2}\,\xi _{\mathrm{1s}}}-\dfrac{\alpha _{s}\,\xi _{\mathrm{1t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1t}}} \end{pmatrix} $$ Let $\xi_1 = \xi_{1t} = \xi_{2t}$ and note that $\xi_{2s} = 0$. Hence, we have: $$ X = \begin{pmatrix} \dfrac{\alpha _{s}\,\xi _{\mathrm{2t}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1}}} \\ \dfrac{\alpha _{t}}{c_{2}}-\dfrac{\alpha _{s}\,\xi _{\mathrm{1}}}{c_{1}\,\xi _{\mathrm{2t}}-c_{2}\,\xi _{\mathrm{1}}} \end{pmatrix} $$ Now, note that $X_1/X_2$ is given by: \begin{align*} \frac{X_1}{X_2} &= \frac{\alpha_s \xi_{2t} \left( c_2 ( c_1 \xi_{2t} - c_2 \xi_{1} ) \right)}{ \left( \alpha_t (c_1 \xi_{2t} - c_2 \xi_{1}) - c_2 \alpha_s \xi_1 \right) (c_1 \xi_{2t} - c_2 \xi_{1t})} \\ &= \frac{\alpha_s c_2 \xi_{2t}}{ \alpha_t (c_1 \xi_{2t} - c_2 \xi_{1}) - c_2 \alpha_s \xi_1 }\\ &= \frac{-\alpha _{s}\,c_{2}\,\xi _{\mathrm{2t}}}{\xi _{1}\,\left(\alpha _{s}\,c_{2}+\alpha _{t}\,c_{2}\right)-\alpha _{t}\,c_{1}\,\xi _{\mathrm{2t}}} \\ &= \frac{-\alpha _{s}\,c_{2}\,\xi _{\mathrm{2t}}}{c_2\,\xi_1-\alpha _{t}\,c_{1}\,\xi _{\mathrm{2t}}} \end{align*} where $\alpha_t + \alpha_s = 1$ by definition (see \hyperref[sec:consumers]{Section II.A}). Next, we can see that: $$ \frac{\partial \log(X_1/X_2)}{c_1} = \frac{\alpha _{t}\,\xi _{\mathrm{2t}}}{c_2\,\xi_1-\alpha _{t}\,c_{1}\,\xi _{\mathrm{2t}}} $$ The elasticity of substitution $e_{1,2}$ is given by: \begin{align*} \frac{\partial \log(X_1/X_2)}{\partial \log(c_2/c_1)} &= \frac{\partial \log(X_1/X_2)}{\partial c_1}\frac{\partial c_1}{\partial \log(c_2/c_1)} \\ &= \left( \frac{\alpha _{t}\,\xi _{\mathrm{2t}}}{c_2\,\xi_1-\alpha _{t}\,c_{1}\,\xi _{\mathrm{2t}}} \right) \left( -c_1 \right) \\ &= \frac{-c_1 \alpha _{t}\,\xi _{\mathrm{2t}}}{c_2\,\xi_1-\alpha _{t}\,c_{1}\,\xi _{\mathrm{2t}}}\\ &= \left( \frac{c_2 \xi_1 }{-c_1 \alpha_t \xi_{2t}} + 1 \right)^{-1} \end{align*} Finally, it is simple to see that: $$ \lim_{c_2/c_1 \to 0} \frac{\partial \log(X_1/X_2)}{\partial \log(c_2/c_1)} = 1$$ Additionally, we can see that the elasticity of substitution between $X_1$ and $X_2$ is linear with respect to $X_1/X_2$. That is, note that we may rewrite the elasticity above as: \begin{align*} \frac{\partial \log(X_1/X_2)}{\partial \log(c_2/c_1)} &= \frac{-c_1 \alpha _{t}\,\xi _{\mathrm{2t}}}{c_2\,\xi_1-\alpha _{t}\,c_{1}\,\xi _{\mathrm{2t}}} \\ &= \left( \frac{-\alpha _{s}\,c_{2}\,\xi _{\mathrm{2t}}}{c_2\,\xi_1-\alpha _{t}\,c_{1}\,\xi _{\mathrm{2t}}} \right) \left( \frac{- \alpha_t c_1 \xi_{2t} }{- \alpha_s c_2 \xi_{2t}} \right)\\ &= \left( \frac{X_1}{X_2} \right) \left( \frac{ \alpha_t c_1 }{\alpha_s c_2 } \right) \end{align*} Hence, we have shown that $e$ can be written as a linear function of $X_1/X_2$. \\ \hfill \end{proof} \pagebreak \section{Appendix B: Supplementary Figures} \label{sec:AppendixB} \autoref{fig:regstaterobust} is a robustness check for fit (3) of \autoref{table:pariv}. We regress fit (3) on subsamples where each state in our dataset is dropped out. In this figure, we plot the regression results; specifically, the estimated coefficients and the inverse of their standard error. \vspace{0.15in} \begin{center} [INSERT Figure 5: Partially Linear IV Regression Estimates with State Drop Outs] \end{center} \vspace{0.15in} \autoref{fig:eosrange} below models the elasticity of substitution for two technologies that are close to being non-intermittent. That is, for technology 1, we have $\xi_1 = (0.95,\, 1)$, and, for technology 2, we have $\xi_2 = (1, \,0.95)$. We further set their costs, $c_1$ and $c_2$, to equal values and allow $\alpha_t = \alpha_s$. This example illustrates how the elasticity of substitution between technologies $e_{1,2}$ would appear with minimal intermittency. We can see that the $e_{1,2}$ takes on a u-shape. \vspace{0.15in} \begin{center} [INSERT Figure 6: The Elasticity of Substitution Between Two Minimally Intermittent Technologies] \end{center} \vspace{0.15in} Additionally, we repeat the exercise done to produce \autoref{fig:ves} but with $\xi_2 = (1, 0.01)$. That is, we assume here that solar is far more intermittent. We plot our results below in \autoref{fig:ves_int}. \vspace{0.15in} \begin{center} [INSERT Figure 7: The VES Approximation of the Elasticity of Substitution between Highly Intermittent Solar and Coal] \end{center} \vspace{0.15in} \pagebreak \section{Appendix C: Econometric Methodology} \label{sec:AppendixC} We aim to estimate $\sigma$ in the following set of equations: \begin{align*} \ln (Z_{ t, i} / Z_{ s, i}) &= -\sigma \ln (P_{t,i} / P_{s,i}) + f \left( A_{t,i}, A_{s,i}, \Delta_{t,s} \right) + u_i \\ \ln (Z_{ t, i} / Z_{ s, i}) &= \beta \ln (P_{t,i} / P_{s,i}) + g \left( \ln (C_{t,i} / C_{s,i}) \right) + v_{i} \end{align*} We base our estimation procedure on \citet{Newey}. To start, for simplicity, let us rewrite the above set of equations as: \begin{align*} Q &= P \beta_d + f(T) + u \\ Q &= P \beta_s + g(W) + v \end{align*} where $\beta_d$ is the parameter of interest. Furthermore, we assume that \begin{align*} E(u \, | \, T, W) &= 0 \\ E(v \, | \, T, W) &= 0 \end{align*} Next, with $\alpha \equiv (\beta_d - \beta_s)^{-1}$ and assuming $\beta_d \neq \beta_s$, note that: \begin{align*} P &= (\beta_d - \beta_s)^{-1} \, \left( g(W) - f(T) + v - u \right) \\ E(P\,|\,T) &= \alpha \left( E(g(W)|T) - f(T) \right)\\ E(P\,|\,W) &= \alpha \left( g(W) - E(f(T)|W) \right) \\ E(P\,|\,T,W) &= \alpha \left( g(W) - f(T) \right) \end{align*} Now, differencing $Q$ with its conditional expectation, we have: \begin{align*} Q - E(Q \,|\,T) &= (P - E(P\,|\,T))\beta_d + (f(T) - E(f(T) \,|\,T)) + (u - E(u|T))\\ &= (\alpha(g(W) - E(g(W)|T)))\beta_d + 0 + u \end{align*} Furthermore, it can be shown that: $$E(P\,|\,T,W) - E(P\,|\,T) = \alpha (g(W) - E(g(W)|T))$$ Hence, we can regress $$(Q - E(Q \,|\,T)) = (E(P\,|\,T,W) - E(P\,|\,T)) \beta_d + u_d $$ to estimate $\beta_d$. But, we cannot know the true values of these expectations. Hence, we estimate each conditional expectation in this regression by using Nadaraya–Watson kernel regressions. This requires us to trim the data, so we drop 1\% of outliers of $Q, P, T$, and $W$. Additionally, we use the \citet{Silverman} rule-of-thumb to select bandwidths. After estimating $E(Q \,|\,T)), (E(P\,|\,T,W),$ and $E(P\,|\,T))$ through kernel regressions, we finally regress $$(Q - \hat{E}(Q \,|\,T)) = (\hat{E}(P\,|\,T,W) - \hat{E}(P\,|\,T)) \beta_d + u_d $$ to obtain $\hat{\beta}_d$. Recall the earlier substitution, $Q = \ln (Z_{ t, i} / Z_{ s, i})$ and $P = \ln (P_{t,i} / P_{s,i})$. So, we have $-\beta_d$ being $\sigma$, the intertemporal elasticity of substitution. \pagebreak \pagebreak \section{Tables and Figures} \begin{table}[!h] \centering \caption{Descriptive Statistics} \label{table:stats} \begin{tabular}{@{\extracolsep{0pt}}lrrrrrrr@{}} \\[-4ex]\hline \hline \\[-1.8ex] {} & Mean & StDev & Min & 25\% & 50\% & 75\% & Max \\ \midrule Electricity Price (\$/kWh) & 0.12 & 0.03 & 0.08 & 0.10 & 0.11 & 0.13 & 0.21 \\ Electricity Load (gWh) & 2454.97 & 2380.24 & 144.00 & 790.50 & 1869.50 & 3315.00 & 18621.00 \\ Coal Price (\$/ton) & 46.00 & 17.68 & 24.04 & 33.43 & 41.40 & 51.83 & 107.49 \\ Cooling Degree Days & 64.66 & 134.97 & 0.00 & 0.00 & 0.00 & 47.25 & 761.00 \\ Heating Degree Days & 443.20 & 412.27 & 0.00 & 48.00 & 367.00 & 754.50 & 1794.00 \\ [0.5ex] \hline \hline \\[-1.8ex] \multicolumn{8}{@{}p{41.5em}@{}}{\textit{Note: } The underlying data covers the 48 contiguous US states from 2011 to 2018; outliers are removed by trimming 1\% of each variable. Each observation corresponds to a particular year, month, and state. } \\ \end{tabular} \end{table} \clearpage \begin{table}[!h] \centering \caption{OLS Regression Results} \label{table:ols} \small \begin{tabular}{@{\extracolsep{5pt}}lcccccc} \\[-4ex]\hline \hline \\[-1.8ex] & \multicolumn{6}{c}{\textit{Dependent variable:} $\ln (Z_{ t, i} / Z_{ s, i})$} \\ [0.5ex] \cline{2-7} \\[-1.8ex] & (1) & (2) & (3) & (4) & (5) & (6)\\ [0.5ex] \hline \\[-1.8ex] $-\ln (P_{t,i} / P_{s,i})$ & 0.937$^{***}$ & 1.075$^{***}$ & 1.098$^{***}$ & 1.074$^{***}$ & 1.263$^{***}$ & 1.305$^{***}$ \\ & (0.030) & (0.026) & (0.026) & (0.224) & (0.172) & (0.169) \\ & & & & & & \\ $\Delta_{t,s}$ & & & 0.0005$^{***}$ & & & 0.0065$^{*}$ \\ & & & (0.0001) & & & (0.0003) \\ & & & & & & \\ CDD$_t$ & & 1.156$^{***}$ & 1.163$^{***}$ & & 1.164$^{***}$ & 1.174$^{***}$ \\ $\quad(\times 1000^{-1})$ & & (0.017) & (0.017) & & (0.072) & (0.075) \\ & & & & & & \\ CDD$_s$ & & $-$1.143$^{***}$ & $-$1.158$^{***}$ & & $-$1.200$^{***}$ & $-$1.224$^{***}$ \\ $\quad(\times 1000^{-1})$& & (0.025) & (0.026) & & (0.096) & (0.098) \\ & & & & & & \\ HDD$_t$ & & 0.246$^{***}$ & 0.245$^{***}$ & & 0.237$^{***}$ & 0.236$^{***}$ \\ $\quad(\times 1000^{-1})$ & & (0.007) & (0.007) & & (0.031) & (0.031) \\ & & & & & & \\ HDD$_s$ & & $-$0.267$^{***}$ & $-$0.265$^{***}$ & & $-$0.268$^{***}$ & $-$0.263$^{***}$ \\ $\quad(\times 1000^{-1})$ & & (0.008) & (0.008) & & (0.038) & (0.038) \\ & & & & & & \\ Intercept & 0.028$^{***}$ & 0.012$^{*}$ & 0.026$^{***}$ & & & \\ & (0.004) & (0.006) & (0.007) & & & \\ [0.9ex] \hline \\[-1.8ex] State FEs & & & & Yes & Yes & Yes \\ Observations & 6,817 & 6,817 & 6,817 & 6,817 & 6,817 & 6,817 \\ R$^{2}$ & 0.079 & 0.507 & 0.508 & 0.092 & 0.521 & 0.524 \\ Adjusted R$^{2}$ & 0.079 & 0.506 & 0.508 & 0.085 & 0.518 & 0.520 \\ F Statistic & 582$^{***}$ & 1399$^{***}$ & 1172$^{***}$ & 685$^{***}$ & 1474$^{***}$ & 1241$^{***}$ \\ [0.5ex] \hline \hline \\[-1.8ex] \multicolumn{7}{@{}p{41.4em}@{}}{\textit{Note: } The sample covers the 48 contiguous US states from 2011 to 2018; outliers are removed by trimming 1\% of each variable except $\Delta_{t,s}$. The unit of observation is a set $(t,s,i)$ where $t \neq s$ are months and $i$ is a state. The coefficient on $-\ln (P_{t,i} / P_{s,i})$ is the estimate of $\sigma$. The variable $\Delta_{t,s}$ is the difference in months between periods $t$ and $s$. CDD$_t$ and HDD$_t$ refer to the total number of heating and cooling degree days in month $t$. We scale the coefficients on degree days for clarity. Robust standard errors are reported in parentheses. *p$\textless$0.05, **p$\textless$0.01, ***p$\textless$0.001} \\ \end{tabular} \end{table} \clearpage \begin{table}[!h] \centering \caption{IV (2SLS) Regression Results} \label{table:iv} \small \begin{tabular}{@{\extracolsep{4pt}}lcccccc} \\[-4ex]\hline \hline \\[-1.6ex] & \multicolumn{3}{c}{First-Stage} & \multicolumn{3}{c}{Second-Stage} \\ [0.5ex] & \multicolumn{3}{c}{\textit{Dep. Variable:} $\ln (P_{t,i} / P_{s,i})$ } & \multicolumn{3}{c}{\textit{Dep. Variable:} $\ln (Z_{ t, i} / Z_{ s, i})$}\\ [0.5ex] \cmidrule(lr){2-4} \cmidrule(lr){5-7}\\[-2.2ex] & (A.1) & (B.1) & (C.1) & (A.2) & (B.2) & (C.2)\\ [0.5ex] \hline \\[-1.8ex] $ \ln (C_{t,i} / C_{s,i})$ & $-$0.042$^{***}$ & $-$0.018$^{***}$ & $-$0.018$^{***}$ & & & \\ & (0.002) & (0.002) & (0.002) & & & \\ & & & & & & \\ $-\ln (P_{t,i} / P_{s,i})$ & & & & 2.978$^{***}$ & 5.896$^{***}$ & 5.818$^{***}$ \\ & & & & (0.180) & (0.548) & (0.524) \\ & & & & & & \\ $\Delta_{t,s}$ & & & 0.001$^{***}$ & & & 0.003$^{***}$ \\ & & & (0.00004) & & & (0.0004) \\ & & & & & & \\ CDD$_t$ & & 0.100$^{***}$ & 0.105$^{***}$ & & 1.637$^{***}$ & 1.657$^{***}$ \\ $\quad(\times 1000^{-1})$ & & (0.006) & (0.006) & & (0.068) & (0.067) \\ & & & & & & \\ CDD$_s$ & & $-$0.096$^{***}$ & $-$0.114$^{***}$ & & $-$1.688$^{***}$ & $-$1.783$^{***}$ \\ $\quad(\times 1000^{-1})$ & & (0.009) & (0.009) & & (0.079) & (0.084) \\ & & & & & & \\ HDD$_t$ & & $-$0.048$^{***}$ & $-$0.048$^{***}$ & & 0.001 & 0.007 \\ $\quad(\times 1000^{-1})$ & & (0.003) & (0.003) & & (0.031) & (0.030) \\ & & & & & & \\ HDD$_s$ & & 0.053$^{***}$ & 0.055$^{***}$ & & 0.0001 & 0.007 \\ $\quad(\times 1000^{-1})$ & & (0.003) & (0.003) & & (0.035) & (0.034) \\ [0.9ex] \hline \\[-1.8ex] State FEs & Yes & Yes & Yes & Yes & Yes & Yes \\ Observations & 6817 & 6817 & 6817 & 6817 & 6817 & 6817 \\ R$^{2}$ & 0.061 & 0.264 & 0.294 & & & \\ Adjusted R$^{2}$ & 0.061 & 0.264 & 0.293 & & & \\ F Statistic & 443$^{***}$ & 489$^{***}$ & 472$^{***}$ & & & \\ \hline \hline \\[-1.8ex] \multicolumn{7}{@{}p{41.5em}@{}}{\textit{Note: } The log difference in coal price between period $t$ and $s$, $ \ln (C_{t,i} / C_{s,i})$, is used as an instrument in these regressions. The sample covers the 48 contiguous US states from 2011 to 2018; outliers are removed by trimming 1\% of each variable except $\Delta_{t,s}$. The unit of observation is a set $(t,s,i)$ where $t \neq s$ are months and $i$ is a state. The coefficient on $\ln (P_{t,i} / P_{s,i})$ is an estimate of $-\sigma$. The variable $\Delta_{t,s}$ is the difference in months between periods $t$ and $s$. CDD$_t$ and HDD$_t$ refer to the total number of heating and cooling degree days in month $t$. We scale the coefficients on degree days for clarity. Robust standard errors are reported in parentheses. *p$\textless$0.05, **p$\textless$0.01, ***p$\textless$0.001} \\ \end{tabular} \end{table} \clearpage \begin{table}[!h] \centering \caption{Partially Linear IV Regression Results} \label{table:pariv} \small \begin{tabular}{@{\extracolsep{5.3em}}lccc} \\[-4ex]\hline \hline \\[-1.8ex] & \multicolumn{3}{c}{\textit{Instrument: $ \ln (C_{t,i} / C_{s,i})$}} \\ \cline{2-4} %\\[-1.8ex] & ln\_load\_rel \textasciitilde ln\_price\_rel & ln\_load\_rel \textasciitilde (CDD\_1) & ln\_load\_rel \textasciitilde time\_diff + (CDD\_1) \\ \\[-1.8ex] & (1) & (2) & (3)\\ [0.5ex] \hline \\[-1.8ex] $\hat{\sigma} $ & 2.9976$^{***}$ & 1.2123$^{***}$ & 0.8847$^{***}$ \\ & (0.169) & (0.052) & (0.044) \\ [0.9ex] \hline \\[-1.8ex] Time Control & & & Yes \\ Degree Day Controls & & Yes & Yes \\ Observations & 6817 & 6817 & 6817 \\ \hline \hline \\[-1.8ex] \multicolumn{4}{@{}p{41.5em}@{}}{\textit{Note: } The log difference in coal price between period $t$ and $s$, $ \ln (C_{t,i} / C_{s,i})$, is used as an instrument in these regressions. The sample covers the 48 contiguous US states from 2011 to 2018; outliers are removed by trimming 1\% of each variable except $\Delta_{t,s}$. The unit of observation is a set $(t,s,i)$ where $t \neq s$ are months and $i$ is a state. The estimation procedure is described \autoref{sec:AppendixC}. Conditional expectations are taken using a Gaussian Kernel with a bandwidth chosen according to \citet{Silverman}. Robust standard errors are reported in parentheses. *p$\textless$0.05, **p$\textless$0.01, ***p$\textless$0.001} \\ \end{tabular} \end{table} \clearpage \begin{figure}[!h] \caption{The Elasticity of Substitution between Solar and Coal} \label{fig:eosnum} \footnotesize \vspace{-1em} \begin{tabular}{@{\extracolsep{0em}}c} \includegraphics[width=1\linewidth]{../figures/fig_elasticity} \\ \multicolumn{1}{@{\hspace{0.2in}}p{5.9in}@{}}{ \textit{Note: } Technology 1 is coal and technology 2 is solar. The legend in the upper subplot also applies to the lower subplot. These results were obtained using the following parameters: $\alpha_t = 0.6$, $\alpha_s = 0.4$, $\xi_1 = (1, \, 1)$, $\xi_2 = (1, \, 0.1)$, $c_1 = 104.3$, $c_2 = 60$. Furthermore, we set the parameter for the intertemporal elasticity of substitution for electricity consumption equal to our estimate $\hat{\sigma} = 0.8847$. In order to generate these numerical results, we first found the optimal quantities of $X$ over a range of prices $c_1^* \in (0.5\, c_1, 2 \,c_1)$. Then, we obtained estimates of the elasticity of substitution by numerically differentiating $\ln(X_1/X_2)$ with respect to $-\ln(c_1/ c_2)$. That is, the elasticity of substitution between technology 1 and 2 is given by the slope of the upper subplot, and it is graphed in the lower subplot. Finally, we repeat this procedure with $\sigma$ equal to two standard deviations above and below its estimated value $\hat{\sigma}$; that is, the dashed lines represent $\sigma = 0.8847 \pm (1.96)(0.044)$. } \\ \end{tabular} \end{figure} \clearpage \begin{figure}[!h] \caption{The Price Elasticity of Demand for Coal Power} \label{fig:coalelas} \footnotesize \vspace{-1em} \begin{tabular}{@{\extracolsep{0em}}c} \includegraphics[width=1\linewidth]{../figures/fig_coal_elas} \\ \multicolumn{1}{@{\hspace{0.2in}}p{5.9in}@{}}{ \textit{Note: } These results were obtained using the following parameters: $\alpha_t = 0.6$, $\alpha_s = 0.4$, $\xi_1 = (1, \, 1)$, $\xi_2 = (1, \, 0.1)$, $c_1 = 104.3$, $c_2 = 60$, $\sigma = 0.5$. We generate these results by finding the optimal quantity of coal, $X_1$, over a range of percent changes in its price $c_1$. Then, on the y-axis, we plot the log difference in $X_1$ divided by the log difference in its price. This is equivalent to the price elasticity of demand for $X_1$. } \\ \end{tabular} \end{figure} \clearpage \begin{figure}[!h] \captionsetup{format=centerproper} \caption{The Effect of Battery Storage on the Elasticity\newline of Substitution between Solar and Coal} \label{fig:battery} \footnotesize \vspace{-1em} \begin{tabular}{@{\extracolsep{0em}}c} \includegraphics[width=1\linewidth]{../figures/fig_batteries} \\ \multicolumn{1}{@{\hspace{0.2in}}p{5.9in}@{}}{ \textit{Note: } Technology 1 is coal and technology 2 is solar. The legend in the upper subplot also applies to the lower subplot. The elasticity of substitution between technology 1 and 2 is given by the slope of the upper subplot, and it is graphed in the lower subplot. These results were obtained using the following parameters: $\alpha_t = 0.6$, $\alpha_s = 0.4$, $\xi_1 = (1, \, 1)$, $\xi_2 = (1, \, 0.1)$, $c_1 = 104.3$, $c_2 = 60$. Furthermore, we set the parameter for the intertemporal elasticity of substitution for electricity consumption equal to our estimate $\hat{\sigma} = 0.8847$. We generated these numerical results with the same procedure used for \autoref{fig:eosnum}. We repeated this procedure with $\xi_2 = (0.95, 0.15)$ and $\xi_2 = (0.90, 0.20)$ to simulate the effects of shifting solar power output using batteries. } \\ \end{tabular} \end{figure} \clearpage \begin{table}[!h] \centering \captionsetup{format=centerproper} \caption{The Effect of Battery Storage on the \newline Elasticity of Demand for Coal Power} \label{table:storelas} \small \begin{tabular}{@{\extracolsep{2em}}ccc} \\[-4ex]\hline \hline \\[-1.8ex] \\[-2.6ex] \specialcell{Solar Output during \\ Peak Hours ($\xi_{2t}$)} & \specialcell{Solar Output during \\ Off-Peak Hours ($\xi_{2s}$)} & \specialcell{The Elasticity of \\ Demand for Coal Power}\\ [0.5ex] \hline \\[-1.8ex] 100\% & \phantom{1}5\% & -3.25 \\ \phantom{1}95\% & 10\% & -3.71 \\ \phantom{1}90\% & 15\% & -4.40 \\ \phantom{1}85\% & 20\% & -5.48 \\ \phantom{1}80\% & 25\% & -7.29 \\ \hline \hline \\[-1.8ex] \multicolumn{3}{@{}p{34.2em}@{}}{\textit{Note: } These results were obtained using the following parameters: $\alpha_t = 0.6$, $\alpha_s = 0.4$, $\xi_1 = (1, \, 1)$, $c_1 = 104.3$, $c_2 = 60$, $\sigma = 0.8847$. We generated these results by numerically differentiating the optimal quantity of coal power, $X_1$, with respect to its own price, $c_1$, around its initial price $104.3$. We repeated this process for various values of the output parameter for solar power, $\xi_{2}$. Specifically, we considered 5\% to 20\% shifts in the output of solar power from peak to off-peak hours to simulate the effects of implementing battery storage. } \\ \end{tabular} \end{table} \clearpage \begin{figure}[!h] \captionsetup{format=centerproper} \caption{The VES Approximation of the Elasticity of Substitution \\ between Solar and Coal } \label{fig:ves} \footnotesize \vspace{-1em} \begin{tabular}{@{\extracolsep{0em}}c} \includegraphics[width=1\linewidth]{../figures/fig_ves_approx} \\ \multicolumn{1}{@{\hspace{0.2in}}p{5.9in}@{}}{ \textit{Note: } Technology 1 is coal and technology 2 is solar. The purple, dash-dots line represents a linear approximation of $e_{1,2}$ for $\sigma = 0.8847$ with a fixed intercept of 1. These results were obtained using the following parameters: $\alpha_t = 0.6$, $\alpha_s = 0.4$, $\xi_1 = (1, \, 1)$, $\xi_2 = (1, \, 0.1)$, $c_1 = 104.3$, $c_2 = 60$. Furthermore, we set the parameter for the intertemporal elasticity of substitution for electricity consumption equal to our estimate $\hat{\sigma} = 0.8847$. In order to generate these numerical results, we first found the optimal quantities of $X$ over a range of prices $c_1^* \in (0.9\, c_1, 2 \,c_1)$. Then, we obtained estimates of the elasticity of substitution by numerically differentiating $\ln(X_1/X_2)$ with respect to $-\ln(c_1, c_2)$. } \\ \end{tabular} \end{figure} \clearpage \begin{table}[h!] \caption{Parameter Restrictions for $Z, X > 0$} \label{tab:paramrest} \small \centering \begin{tabular}{@{\extracolsep{2em}}l@{\hspace{-0.5 em}}cc} \\[-4ex] \toprule \\[-2.5ex] & \textbf{Case 1} & \textbf{Case 2 } \\ \cmidrule(lr){2-2} \cmidrule(lr){3-3} \\[-1.5ex] \textbf{Cost Efficiency}& $\xi_{2t}/c_2 > \xi_{1t}/c_1$ & $\xi_{2t}/c_2 < \xi_{1t}/c_1 $\\ \textbf{Restrictions} & $\xi_{1s}/c_1 > \xi_{2s}/c_2$ & $\xi_{1s}/c_1 < \xi_{2s}/c_2 $ \\ [3ex] \textbf{Output Efficiency}& $\xi_{2t}/\xi_{2s} > \xi_{1t}/\xi_{1s}$ & $\xi_{2t}/\xi_{2s} < \xi_{1t}/\xi_{1s}$\\ \textbf{Restrictions} & $\xi_{1s}/\xi_{1t} > \xi_{2s}/\xi_{2t} $ & $\xi_{1s}/\xi_{1t} < \xi_{2s}/\xi_{2t} $ \\ [3ex] \multirow{2}{10em}{\textbf{Mixed Efficiency Restrictions}}& $\dfrac{\alpha_s \left(\xi_{1s}/c_1 - \xi_{2s}/c_2\right)}{\alpha_t \left( \xi_{2t}/c_2 - \xi_{1t}/c_1 \right)} > \xi_{2s}/\xi_{2t}$ & $\dfrac{\alpha_s \left(\xi_{1s}/c_1 - \xi_{2s}/c_2\right)}{\alpha_t \left( \xi_{2t}/c_2 - \xi_{1t}/c_1 \right)} < \xi_{2s}/\xi_{2t}$\\ & $\dfrac{\alpha_s \left(\xi_{1s}/c_1 - \xi_{2s}/c_2\right)}{\alpha_t \left( \xi_{2t}/c_2 - \xi_{1t}/c_1 \right)} < \xi_{1s}/\xi_{1t} $ & $\dfrac{\alpha_s \left(\xi_{1s}/c_1 - \xi_{2s}/c_2\right)}{\alpha_t \left( \xi_{2t}/c_2 - \xi_{1t}/c_1 \right)} > \xi_{1s}/\xi_{1t} $ \\[2ex] \midrule \multicolumn{3}{@{}p{40em}@{}}{\footnotesize \textit{Note: } The inequalities in this table assume that all elements of $\xi$ are greater than $0$. The full proof given below provides equivalent restrictions for the zero cases. } \end{tabular} \end{table} \clearpage \begin{figure}[h!] \captionsetup{format=centerproper} \caption{Partially Linear IV Regression Estimates with State Drop Outs} \label{fig:regstaterobust} \footnotesize \vspace{-1em} \begin{tabular}{@{\extracolsep{0em}}c} \includegraphics[width=0.75\linewidth]{../figures/regression_state_robustness_check.png} \\ \multicolumn{1}{@{\hspace{0.2in}}p{5.9in}@{}}{ \textit{Note: } This is a joint plot of the IES $\hat{\sigma}$ against the inverse of its estimated standard deviation from fit (3) of \autoref{table:pariv}. Each point represents an estimate obtained from regressing on a dataset that drops out one state from the full sample. Since the sample consists of the 48 contiguous US states, this regression is a decrease in sample size of 2.08\% relative to the full dataset. On the top and right side of the graph are histograms for the two variables.} \\ \end{tabular} \end{figure} \clearpage \begin{figure}[h!] \captionsetup{format=centerproper} \caption{The Elasticity of Substitution Between Two \newline Minimally Intermittent Technologies} \label{fig:eosrange} \footnotesize \vspace{-1em} \begin{tabular}{@{\extracolsep{0em}}c} \includegraphics[width=1\linewidth]{../figures/fig_elasticity_range} \\ \multicolumn{1}{@{\hspace{0.2in}}p{5.9in}@{}}{ \textit{Note: } The y-axis of the first plot is equivalent to $\log(X_1/X_2)$ and the x-axis of both plots is equivalent to $\log(c_2/c_1)$. Technology 1 and 2 represent two arbitrary technologies that are practically non-intermittent. The legend in the upper subplot also applies to the lower subplot. These results were obtained using the following parameters: $\alpha_t = 0.5$, $\alpha_s = 0.5$, $\xi_1 = (0.95, \, 1)$, $\xi_2 = (1, \, 0.95)$, $c_1 = 100$, $c_2 = 100$. In order to generate these numerical results, we first found the optimal quantities of $X$ over a range of prices $c_1^* \in (0.5\, c_1, 1.5 \,c_1)$. Then, we obtained estimates of the elasticity of substitution by numerically differentiating $\ln(X_1/X_2)$ with respect to $-\ln(c_1, c_2)$. That is, the elasticity of substitution between technology 1 and 2 is given by the slope of the upper subplot, and it is graphed in the lower subplot. Finally, we repeat this procedure for various values of $\sigma$. } \\ \end{tabular} \end{figure} \clearpage \begin{figure}[h!] \captionsetup{format=centerproper} \caption{The VES Approximation of the Elasticity of Substitution \\ between Highly Intermittent Solar and Coal} \label{fig:ves_int} \footnotesize \vspace{-1em} \begin{tabular}{@{\extracolsep{0em}}c} \includegraphics[width=1\linewidth]{../figures/fig_ves_approx_int} \\ \multicolumn{1}{@{\hspace{0.2in}}p{5.9in}@{}}{ \textit{Note: } Technology 1 is coal and technology 2 is solar. The purple, dash-dots line represents a linear approximation of $e_{1,2}$ for $\sigma = 0.8847$ with a fixed intercept of 1. These results were obtained using the following parameters: $\alpha_t = 0.6$, $\alpha_s = 0.4$, $\xi_1 = (1, \, 1)$, $\xi_2 = (1, \, 0.01)$, $c_1 = 104.3$, $c_2 = 60$. Furthermore, we set the parameter for the intertemporal elasticity of substitution for electricity consumption equal to our estimate $\hat{\sigma} = 0.8847$. In order to generate these numerical results, we first found the optimal quantities of $X$ over a range of prices $c_1^* \in (c_1, 2 \,c_1)$. Then, we obtained estimates of the elasticity of substitution by numerically differentiating $\ln(X_1/X_2)$ with respect to $-\ln(c_1, c_2)$. } \\ \end{tabular} \end{figure} \clearpage \end{document}
{ "alphanum_fraction": 0.7257486323, "avg_line_length": 108.1931464174, "ext": "tex", "hexsha": "340abf04291c4d7e77e3c87abb83700e57086637", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "c9d65c54f2bc005f4d884998352b6801a467426b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "sa-914/Energy-Intermittency-Paper", "max_forks_repo_path": "documents/draft.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "c9d65c54f2bc005f4d884998352b6801a467426b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "sa-914/Energy-Intermittency-Paper", "max_issues_repo_path": "documents/draft.tex", "max_line_length": 2342, "max_stars_count": null, "max_stars_repo_head_hexsha": "c9d65c54f2bc005f4d884998352b6801a467426b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "sa-914/Energy-Intermittency-Paper", "max_stars_repo_path": "documents/draft.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 40559, "size": 138920 }
\documentclass[11pt]{article} \usepackage{setspace} \usepackage{graphicx} \usepackage{subfigure} \usepackage{lscape} \usepackage{flafter} % Don't place floats before their definition \usepackage{bm} % Define \bm{} to use bold math fonts \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{MnSymbol} \usepackage{url} \usepackage{natbib} %\usepackage{fullpage} \bibliographystyle{cbe} \citestyle{aa} %\usepackage{algorithmic} %\usepackage[vlined,algochapter,ruled]{algorithm2e} \usepackage[vlined,ruled]{algorithm2e} \SetKwComment{Comment}{$\triangleright\ $}{} %ROOT PRIORS? %Add Giribet and Wheeler arth, ratchet cite, Arango sea spiders \title{Algorithmic Descriptions and Pseudo-Code for Rerooting Logic in a Phylogenetic Network.} \author{ Alex Washburn \\ Division of Invertebrate Zoology, \\ American Museum of Natural History, \\ Central Park West @ 79th Street, \\ New York, NY 10024-5192, \\ USA, \\ [email protected] } %\date{} \begin{document} \maketitle \begin{abstract} Description of memoized implementation of rerooting logic on an unrooted binary tree and extension to memoized rerooting on a phylogentic network. The network extension requires resolving potential cycles. \end{abstract} %\newpage %\tableofcontents %\newpage %\doublespacing \section{Introduction} Rerooting is the method of considering every edge in a rooted tree to be the potential root, determining the cost and character states for the tree if that edge was selected as the root. A precondition is that a postorder scoring of the tree has already occurred with an arbitrary edge in the unrooted tree selected as the root. \section{Memoized rerooting of an unrooted binary tree} \section{Unary set of directed memoization choices on unrooted binary tree} \section{Trinary set of directed memoization choices on unrooted phylogenetic network} \section{Pruning cyclic subtree resolutions on unrooted phylogenetic network} \end{document}
{ "alphanum_fraction": 0.7526416907, "avg_line_length": 35.8965517241, "ext": "tex", "hexsha": "8e3e58887d87d32bc3b6122c2ad3618ab56d0705", "lang": "TeX", "max_forks_count": 2, "max_forks_repo_forks_event_max_datetime": "2021-03-25T02:40:02.000Z", "max_forks_repo_forks_event_min_datetime": "2021-02-17T23:45:27.000Z", "max_forks_repo_head_hexsha": "9f8001c0e7ae72f39b8451fc8dce4c745855d5a3", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "recursion-ninja/PCG", "max_forks_repo_path": "doc/tex/Rerooting-Pseudocode.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "9f8001c0e7ae72f39b8451fc8dce4c745855d5a3", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "recursion-ninja/PCG", "max_issues_repo_path": "doc/tex/Rerooting-Pseudocode.tex", "max_line_length": 329, "max_stars_count": null, "max_stars_repo_head_hexsha": "9f8001c0e7ae72f39b8451fc8dce4c745855d5a3", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "recursion-ninja/PCG", "max_stars_repo_path": "doc/tex/Rerooting-Pseudocode.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 540, "size": 2082 }
% Default to the notebook output style % Inherit from the specified cell style. \documentclass[11pt]{article} \usepackage[T1]{fontenc} % Nicer default font (+ math font) than Computer Modern for most use cases \usepackage{mathpazo} % Basic figure setup, for now with no caption control since it's done % automatically by Pandoc (which extracts ![](path) syntax from Markdown). \usepackage{graphicx} % We will generate all images so they have a width \maxwidth. This means % that they will get their normal width if they fit onto the page, but % are scaled down if they would overflow the margins. \makeatletter \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth \else\Gin@nat@width\fi} \makeatother \let\Oldincludegraphics\includegraphics % Set max figure width to be 80% of text width, for now hardcoded. \renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=.8\maxwidth]{#1}} % Ensure that by default, figures have no caption (until we provide a % proper Figure object with a Caption API and a way to capture that % in the conversion process - todo). \usepackage{caption} \DeclareCaptionLabelFormat{nolabel}{} \captionsetup{labelformat=nolabel} \usepackage{adjustbox} % Used to constrain images to a maximum size \usepackage{xcolor} % Allow colors to be defined \usepackage{enumerate} % Needed for markdown enumerations to work \usepackage{geometry} % Used to adjust the document margins \usepackage{amsmath} % Equations \usepackage{amssymb} % Equations \usepackage{textcomp} % defines textquotesingle % Hack from http://tex.stackexchange.com/a/47451/13684: \AtBeginDocument{% \def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code } \usepackage{upquote} % Upright quotes for verbatim code \usepackage{eurosym} % defines \euro \usepackage[mathletters]{ucs} % Extended unicode (utf-8) support \usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document \usepackage{fancyvrb} % verbatim replacement that allows latex \usepackage{grffile} % extends the file name processing of package graphics % to support a larger range % The hyperref package gives us a pdf with properly built % internal navigation ('pdf bookmarks' for the table of contents, % internal cross-reference links, web links for URLs, etc.) \usepackage{hyperref} \usepackage{longtable} % longtable support required by pandoc >1.10 \usepackage{booktabs} % table support for pandoc > 1.12.2 \usepackage[inline]{enumitem} % IRkernel/repr support (it uses the enumerate* environment) \usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout) % normalem makes italics be italics, not underlines % Colors for the hyperref package \definecolor{urlcolor}{rgb}{0,.145,.698} \definecolor{linkcolor}{rgb}{.71,0.21,0.01} \definecolor{citecolor}{rgb}{.12,.54,.11} % ANSI colors \definecolor{ansi-black}{HTML}{3E424D} \definecolor{ansi-black-intense}{HTML}{282C36} \definecolor{ansi-red}{HTML}{E75C58} \definecolor{ansi-red-intense}{HTML}{B22B31} \definecolor{ansi-green}{HTML}{00A250} \definecolor{ansi-green-intense}{HTML}{007427} \definecolor{ansi-yellow}{HTML}{DDB62B} \definecolor{ansi-yellow-intense}{HTML}{B27D12} \definecolor{ansi-blue}{HTML}{208FFB} \definecolor{ansi-blue-intense}{HTML}{0065CA} \definecolor{ansi-magenta}{HTML}{D160C4} \definecolor{ansi-magenta-intense}{HTML}{A03196} \definecolor{ansi-cyan}{HTML}{60C6C8} \definecolor{ansi-cyan-intense}{HTML}{258F8F} \definecolor{ansi-white}{HTML}{C5C1B4} \definecolor{ansi-white-intense}{HTML}{A1A6B2} % commands and environments needed by pandoc snippets % extracted from the output of `pandoc -s` \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} % Add ',fontsize=\small' for more characters per line \newenvironment{Shaded}{}{} \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}} \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}} \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}} \newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}} \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}} \newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}} \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}} \newcommand{\RegionMarkerTok}[1]{{#1}} \newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}} \newcommand{\NormalTok}[1]{{#1}} % Additional commands for more recent versions of Pandoc \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}} \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}} \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}} \newcommand{\ImportTok}[1]{{#1}} \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}} \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}} \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}} \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}} \newcommand{\BuiltInTok}[1]{{#1}} \newcommand{\ExtensionTok}[1]{{#1}} \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}} \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}} \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}} % Define a nice break command that doesn't care if a line doesn't already % exist. \def\br{\hspace*{\fill} \\* } % Math Jax compatability definitions \def\gt{>} \def\lt{<} % Document parameters \title{ELEN3007 - Probabilistic Systems Analysis Assignment} % Pygments definitions \makeatletter \def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax% \let\PY@ul=\relax \let\PY@tc=\relax% \let\PY@bc=\relax \let\PY@ff=\relax} \def\PY@tok#1{\csname PY@tok@#1\endcsname} \def\PY@toks#1+{\ifx\relax#1\empty\else% \PY@tok{#1}\expandafter\PY@toks\fi} \def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{% \PY@it{\PY@bf{\PY@ff{#1}}}}}}} \def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}} \expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}} \expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}} \expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}} \expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}} \expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}} \expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}} \expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}} \expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}} \expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}} \expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}} \expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}} \expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}} \expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}} \expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}} \expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit} \expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf} \expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}} \expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}} \expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}} \expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}} \expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}} \expandafter\def\csname PY@tok@fm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}} \expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@vm\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}} \expandafter\def\csname PY@tok@sa\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@dl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}} \expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}} \expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}} \def\PYZbs{\char`\\} \def\PYZus{\char`\_} \def\PYZob{\char`\{} \def\PYZcb{\char`\}} \def\PYZca{\char`\^} \def\PYZam{\char`\&} \def\PYZlt{\char`\<} \def\PYZgt{\char`\>} \def\PYZsh{\char`\#} \def\PYZpc{\char`\%} \def\PYZdl{\char`\$} \def\PYZhy{\char`\-} \def\PYZsq{\char`\'} \def\PYZdq{\char`\"} \def\PYZti{\char`\~} % for compatibility with earlier versions \def\PYZat{@} \def\PYZlb{[} \def\PYZrb{]} \makeatother % Exact colors from NB \definecolor{incolor}{rgb}{0.0, 0.0, 0.5} \definecolor{outcolor}{rgb}{0.545, 0.0, 0.0} % Prevent overflowing lines due to hard-to-break entities \sloppy % Setup hyperref package \hypersetup{ breaklinks=true, % so long urls are correctly broken across lines colorlinks=true, urlcolor=urlcolor, linkcolor=linkcolor, citecolor=citecolor, } % Slightly bigger margins than the latex defaults \geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in} \begin{document} \maketitle \section{ELEN3007 ICAO-Codes-Weather-analysis}\label{elen3007-icao-codes-weather-analysis} \subparagraph{This Script imports the weather data for the Chulman Airport (UELL) weather station, in Russia and then answers question outlined.}\label{this-script-imports-the-weather-data-for-the-chulman-airport-uell-weather-station-in-russia-and-then-answers-question-outlined.} Each question that has a discussion of results is analised in the respective question markdown block before the code block. \\ The Git repo, storing this code, can be seen here: https://github.com/SoIidarity/ICAO-Weather-Analysis-Python This main file, showing all code execution, can be seen here: https://github.com/SoIidarity/ICAO-Weather-Analysis-Python/blob/master/MainFile.ipynb \\ Both a PDF version and HTML version of this report are included in this submission. For an optimal viewing experience, please view the HTML version as the formatting is persistent from the Jupyter notebook. Additionally, the PDF version has HTML elements stripped out, such as some images. Alternativly, view the second Git link above to view the notebook online, through Github. \\ First, Import libraries and such needed for program execution. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}10}]:} \PY{k+kn}{import} \PY{n+nn}{numpy} \PY{k}{as} \PY{n+nn}{np} \PY{k+kn}{import} \PY{n+nn}{seaborn} \PY{k}{as} \PY{n+nn}{sns} \PY{k+kn}{import} \PY{n+nn}{pandas} \PY{k}{as} \PY{n+nn}{pd} \PY{k+kn}{from} \PY{n+nn}{statsmodels}\PY{n+nn}{.}\PY{n+nn}{graphics}\PY{n+nn}{.}\PY{n+nn}{tsaplots} \PY{k}{import} \PY{n}{plot\PYZus{}acf} \PY{k+kn}{import} \PY{n+nn}{scipy}\PY{n+nn}{.}\PY{n+nn}{stats} \PY{k+kn}{import} \PY{n+nn}{matplotlib}\PY{n+nn}{.}\PY{n+nn}{pyplot} \PY{k}{as} \PY{n+nn}{plt} \PY{k+kn}{from} \PY{n+nn}{IPython}\PY{n+nn}{.}\PY{n+nn}{display} \PY{k}{import} \PY{n}{HTML}\PY{p}{,} \PY{n}{display} \PY{k+kn}{from} \PY{n+nn}{io} \PY{k}{import} \PY{n}{BytesIO} \PY{k+kn}{from} \PY{n+nn}{base64} \PY{k}{import} \PY{n}{b64encode} \PY{k+kn}{import} \PY{n+nn}{scipy}\PY{n+nn}{.}\PY{n+nn}{misc} \PY{k}{as} \PY{n+nn}{smp} \PY{k+kn}{from} \PY{n+nn}{mpl\PYZus{}toolkits}\PY{n+nn}{.}\PY{n+nn}{mplot3d} \PY{k}{import} \PY{n}{Axes3D} \PY{k+kn}{from} \PY{n+nn}{matplotlib} \PY{k}{import} \PY{n}{cm} \PY{k+kn}{from} \PY{n+nn}{matplotlib}\PY{n+nn}{.}\PY{n+nn}{ticker} \PY{k}{import} \PY{n}{LinearLocator} \PY{k+kn}{from} \PY{n+nn}{scipy}\PY{n+nn}{.}\PY{n+nn}{stats} \PY{k}{import} \PY{n}{norm} \PY{n}{plt}\PY{o}{.}\PY{n}{rcParams}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{figure.figsize}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{p}{(}\PY{l+m+mi}{26}\PY{p}{,} \PY{l+m+mi}{13}\PY{p}{)} \end{Verbatim} First, define some useful functions. These are used in the printing of data later on. Also used for rendering images to the HTML page. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}2}]:} \PY{k}{def} \PY{n+nf}{printMatrix}\PY{p}{(}\PY{n}{data}\PY{p}{)}\PY{p}{:} \PY{c+c1}{\PYZsh{}used to print matricies to HTML} \PY{n}{display}\PY{p}{(}\PY{n}{HTML}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{\PYZlt{}table\PYZgt{}\PYZlt{}tr\PYZgt{}}\PY{l+s+si}{\PYZob{}\PYZcb{}}\PY{l+s+s1}{\PYZlt{}/tr\PYZgt{}\PYZlt{}/table\PYZgt{}}\PY{l+s+s1}{\PYZsq{}}\PY{o}{.}\PY{n}{format}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{\PYZlt{}/tr\PYZgt{}\PYZlt{}tr\PYZgt{}}\PY{l+s+s1}{\PYZsq{}}\PY{o}{.}\PY{n}{join}\PY{p}{(} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{\PYZlt{}td\PYZgt{}}\PY{l+s+si}{\PYZob{}\PYZcb{}}\PY{l+s+s1}{\PYZlt{}/td\PYZgt{}}\PY{l+s+s1}{\PYZsq{}} \PY{o}{.}\PY{n}{format}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{\PYZlt{}/td\PYZgt{}\PYZlt{}td\PYZgt{}}\PY{l+s+s1}{\PYZsq{}} \PY{o}{.}\PY{n}{join}\PY{p}{(}\PY{n+nb}{str}\PY{p}{(}\PY{n}{\PYZus{}}\PY{p}{)} \PY{k}{for} \PY{n}{\PYZus{}} \PY{o+ow}{in} \PY{n}{row}\PY{p}{)}\PY{p}{)} \PY{k}{for} \PY{n}{row} \PY{o+ow}{in} \PY{n}{data}\PY{p}{)} \PY{p}{)} \PY{p}{)}\PY{p}{)} \PY{k}{def} \PY{n+nf}{printText}\PY{p}{(}\PY{n}{text}\PY{p}{)}\PY{p}{:} \PY{n}{display}\PY{p}{(}\PY{n}{HTML}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{\PYZlt{}p\PYZgt{}}\PY{l+s+s1}{\PYZsq{}} \PY{o}{+} \PY{n}{text} \PY{o}{+} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{\PYZlt{}p\PYZgt{}}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{)} \PY{k}{def} \PY{n+nf}{displayHTML}\PY{p}{(}\PY{n}{html}\PY{p}{)}\PY{p}{:} \PY{n}{display}\PY{p}{(}\PY{n}{HTML}\PY{p}{(}\PY{n}{html}\PY{p}{)}\PY{p}{)} \PY{k}{def} \PY{n+nf}{drawImg}\PY{p}{(}\PY{n}{img}\PY{p}{)}\PY{p}{:} \PY{n}{b} \PY{o}{=} \PY{n}{BytesIO}\PY{p}{(}\PY{p}{)} \PY{n}{img}\PY{o}{.}\PY{n}{save}\PY{p}{(}\PY{n}{b}\PY{p}{,} \PY{n+nb}{format}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{png}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{displayHTML}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{\PYZlt{}img src=}\PY{l+s+s2}{\PYZsq{}}\PY{l+s+s2}{data:image/png;base64,}\PY{l+s+si}{\PYZob{}0\PYZcb{}}\PY{l+s+s2}{\PYZsq{}}\PY{l+s+s2}{/\PYZgt{}}\PY{l+s+s2}{\PYZdq{}} \PY{o}{.}\PY{n}{format}\PY{p}{(}\PY{n}{b64encode}\PY{p}{(}\PY{n}{b}\PY{o}{.}\PY{n}{getvalue}\PY{p}{(}\PY{p}{)}\PY{p}{)}\PY{o}{.}\PY{n}{decode}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{utf\PYZhy{}8}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{)}\PY{p}{)} \end{Verbatim} \subsection{Question 1}\label{question-1} Import data from text files. These are stored as csvs in the "Data" directory in the repo. This CSV is formatted as: \{Unit Timestamp, max Temp, avg Temp, min Temp\}. Each CSV is read into a matrix. These matricies are then added to a vector so they can be itterated through later on. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}3}]:} \PY{n}{w1995} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{genfromtxt}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Data/1995.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{delimiter}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{,}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{w2000} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{genfromtxt}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Data/2000.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{delimiter}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{,}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{w2005} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{genfromtxt}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Data/2005.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{delimiter}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{,}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{w2010} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{genfromtxt}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Data/2010.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{delimiter}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{,}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{w2015} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{genfromtxt}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Data/2015.csv}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{delimiter}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{,}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{weatherData} \PY{o}{=} \PY{p}{[}\PY{n}{w1995}\PY{p}{,} \PY{n}{w2000}\PY{p}{,} \PY{n}{w2005}\PY{p}{,} \PY{n}{w2010}\PY{p}{,} \PY{n}{w2015}\PY{p}{]} \end{Verbatim} \subsection{Question 2}\label{question-2} Next, identify the minimum, maximum, mean and standard deviation for each year. As the matricies are in a vector, we can do this sequentially in a loop. These outputs are produced in a matrix, where the columns are the years (1995,2000,2005,2010,2015) and the rows are the minimum, maximum, mean and standard deviation for each year. This can be seen in the table below. The mean is calculated with the standard numpy mean equation that calculates the following. This is the average of the data set: \[\bar{x} = \frac{1}{n}\left (\sum_{i=1}^n{x_i}\right ) = \frac{x_1+x_2+\cdots +x_n}{n}\] Standard deviation is used to quantify variance in the data, as defined by: \[s = \sqrt{\frac{\sum_{i=1}^N (x_i - \overline{x})^2}{N-1} }\] \subsubsection{Comment on findings}\label{comment-on-findings} The minimum, maximum, mean and standard deviations are very similar for each year. this is to be expected as they were taken from the same location. However, as time went on over the past 20 years, it seems that the averages became walmer with both the maximums and minimums increasing over time. There are outlyers in this trend as seen in 2010's minimum tempreture. This trend is also verified by looking at the mean where each year is shown to be getting walmer. The high standard deviation seen is accounted for by the large tempreture changes during the course of the year. A delta between the maximum of 37 and minimum of -45 of 82 degrees is rather high. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}4}]:} \PY{n}{dataValues} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{p}{(}\PY{l+m+mi}{4}\PY{p}{,} \PY{l+m+mi}{5}\PY{p}{)}\PY{p}{)} \PY{n}{counter} \PY{o}{=} \PY{l+m+mi}{0}\PY{p}{;} \PY{k}{for} \PY{n}{year} \PY{o+ow}{in} \PY{n}{weatherData}\PY{p}{:} \PY{n}{dataValues}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{,} \PY{n}{counter}\PY{p}{]} \PY{o}{=} \PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{3}\PY{p}{]}\PY{o}{.}\PY{n}{min}\PY{p}{(}\PY{p}{)} \PY{c+c1}{\PYZsh{}max of max values} \PY{n}{dataValues}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{,} \PY{n}{counter}\PY{p}{]} \PY{o}{=} \PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{]}\PY{o}{.}\PY{n}{max}\PY{p}{(}\PY{p}{)} \PY{c+c1}{\PYZsh{}min of min values} \PY{n}{dataValues}\PY{p}{[}\PY{l+m+mi}{2}\PY{p}{,} \PY{n}{counter}\PY{p}{]} \PY{o}{=} \PY{n+nb}{round}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{)} \PY{c+c1}{\PYZsh{}average of average values} \PY{n}{dataValues}\PY{p}{[}\PY{l+m+mi}{3}\PY{p}{,} \PY{n}{counter}\PY{p}{]} \PY{o}{=} \PY{n+nb}{round}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{)} \PY{c+c1}{\PYZsh{}Standard deviation of average values} \PY{n}{counter} \PY{o}{=} \PY{n}{counter} \PY{o}{+} \PY{l+m+mi}{1}\PY{p}{;} \PY{n}{displayHTML}\PY{p}{(}\PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{n}{dataValues}\PY{p}{,} \PY{n}{columns} \PY{o}{=} \PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1995}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2000}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2005}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2010}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2015}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{,} \PY{n}{index} \PY{o}{=} \PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Minimum}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Maximum}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Mean}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Standard Deviation}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{to\PYZus{}html}\PY{p}{(}\PY{p}{)}\PY{p}{)} \end{Verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \subsection{Question 3}\label{question-3} Each Years probability distribution functions are now plotted, for each year, on the same set of axes. To achive this, a fixed plot of univariate distributions is generated. This is done with the Seaborn displot function. This value is calculated by using the seaborn distplot function that recreates the standard PDF equation as: \[\operatorname{E}[X] = \int_{-\infty}^\infty x\,f(x)\,dx\] \subsubsection{Comment on findings}\label{comment-on-findings} Each year has a similar general probobility distribution function. They apear to be close to normal in shape. general average trends can also be seen through this graph such as 2005 had higher distribution of both highs and lows, with a lower distribution in the middel. This indicates more extreme weather during this year. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}5}]:} \PY{n}{sns}\PY{o}{.}\PY{n}{set\PYZus{}style}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{whitegrid}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{counter} \PY{o}{=} \PY{l+m+mi}{0}\PY{p}{;} \PY{k}{for} \PY{n}{year} \PY{o+ow}{in} \PY{n}{weatherData}\PY{p}{:} \PY{n}{sns}\PY{o}{.}\PY{n}{distplot}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]}\PY{p}{,} \PY{n}{hist}\PY{o}{=}\PY{k+kc}{False}\PY{p}{,} \PY{n}{label}\PY{o}{=}\PY{l+m+mi}{1995} \PY{o}{+} \PY{n}{counter} \PY{o}{*} \PY{l+m+mi}{5}\PY{p}{,} \PY{n}{axlabel}\PY{o}{=}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Temperature (C)}\PY{l+s+s2}{\PYZdq{}}\PY{p}{)} \PY{n}{counter} \PY{o}{=} \PY{n}{counter} \PY{o}{+} \PY{l+m+mi}{1}\PY{p}{;} \PY{n}{plt}\PY{o}{.}\PY{n}{title}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Probability Distribution Function}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{plt}\PY{o}{.}\PY{n}{show}\PY{p}{(}\PY{p}{)} \end{Verbatim} \begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_10_0.png} \end{center} { \hspace*{\fill} \\} \subsection{Question 4}\label{question-4} The cross-correlation between each year's annual temperatures is now calculated. This is shown as a matrix output Note here that we are slicing the data on each year to ignore the leap year in 2000. This is done as the correlation needs matricies of the same dimension. Additionally note that we need to normalise the data. This is the same as using the numpy corrfoef function. TO calculate this, the numpy correlate function was used. This function makes use of this equation: \[(f \star g)(\tau)\ \stackrel{\mathrm{def}}{=} \int_{-\infty}^{\infty} f^*(t)\ g(t+\tau)\,dt\] \subsubsection{Comment on findings}\label{comment-on-findings} Through the cross-correlation of the presented data, one can see the relationship between each years weather. The closer these values are to 1, the more closely correlated the data is. The highest correlation is seen between 2010 and 1995 intrestingly enough with a 94\% correlation. The main diagonal of 1's indicates the full correlation, with each year corelated against its self. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}6}]:} \PY{n}{autoCorrelation} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{p}{(}\PY{l+m+mi}{5}\PY{p}{,} \PY{l+m+mi}{5}\PY{p}{)}\PY{p}{)} \PY{k}{for} \PY{n}{x} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{l+m+mi}{0}\PY{p}{,} \PY{l+m+mi}{5}\PY{p}{)}\PY{p}{:} \PY{k}{for} \PY{n}{y} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{l+m+mi}{0}\PY{p}{,} \PY{l+m+mi}{5}\PY{p}{)}\PY{p}{:} \PY{n}{a} \PY{o}{=} \PY{n}{weatherData}\PY{p}{[}\PY{n}{x}\PY{p}{]}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{:}\PY{l+m+mi}{365}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]} \PY{n}{b} \PY{o}{=} \PY{n}{weatherData}\PY{p}{[}\PY{n}{y}\PY{p}{]}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{:}\PY{l+m+mi}{365}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]} \PY{n}{a} \PY{o}{=} \PY{p}{(}\PY{n}{a} \PY{o}{\PYZhy{}} \PY{n}{np}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{n}{a}\PY{p}{)}\PY{p}{)} \PY{o}{/} \PY{p}{(}\PY{n}{np}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{n}{a}\PY{p}{)} \PY{o}{*} \PY{n+nb}{len}\PY{p}{(}\PY{n}{a}\PY{p}{)}\PY{p}{)} \PY{n}{b} \PY{o}{=} \PY{p}{(}\PY{n}{b} \PY{o}{\PYZhy{}} \PY{n}{np}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{n}{b}\PY{p}{)}\PY{p}{)} \PY{o}{/} \PY{p}{(}\PY{n}{np}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{n}{b}\PY{p}{)}\PY{p}{)} \PY{n}{autoCorrelation}\PY{p}{[}\PY{n}{x}\PY{p}{,} \PY{n}{y}\PY{p}{]} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{correlate}\PY{p}{(}\PY{n}{a}\PY{p}{,} \PY{n}{b}\PY{p}{)} \PY{n}{columns} \PY{o}{=} \PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1995}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2000}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2005}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2010}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2015}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{n}{index} \PY{o}{=} \PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{1995}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2000}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2005}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2010}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{2015}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{n}{displayHTML}\PY{p}{(}\PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{n}{autoCorrelation}\PY{p}{,} \PY{n}{columns}\PY{p}{,} \PY{n}{index}\PY{p}{)}\PY{o}{.}\PY{n}{to\PYZus{}html}\PY{p}{(}\PY{p}{)}\PY{p}{)} \end{Verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \subsection{Question 5}\label{question-5} The autocorrelation function for each year's data, where τ ranges from 0 to 364 is now generated. Confidence intervals are drawn as a cone. By default, this is set to a 95\% confidence interval, suggesting that correlation values outside of this code are very likely a correlation and not a statistical fluke. First, each Autocorrelation is drawn on its own graph. After, they are overlayed. Autocorrelation is found through the use of the plot\_acf function from the tsaplots statistics library. From a signal processing point of view, this can be seen as: \[R_{ff}(\tau) = (f * g_{-1}(\overline{f}))(\tau) = \int_{-\infty}^\infty f(u+\tau)\overline{f}(u)\, {\rm d}u = \int_{-\infty}^\infty f(u)\overline{f}(u-\tau)\, {\rm d}u\] \subsubsection{Comment on findings}\label{comment-on-findings} This autocorrelation shows how each year is related to a time-shifted version of itself. This can be seen as an effective convolution process. From this, one can see how each part of the year is related to the rest of the year for a given shift amount τ. For example, at τ=0, the magnitude is 1. At this time, no shifting has occurred. At approximately τ=150, there is the highest negative correlation seen. This is due to the correlation between the middle of summer and winter of the two data sets. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}7}]:} \PY{n}{counter} \PY{o}{=} \PY{l+m+mi}{0}\PY{p}{;} \PY{k}{for} \PY{n}{year} \PY{o+ow}{in} \PY{n}{weatherData}\PY{p}{:} \PY{k}{if} \PY{p}{(}\PY{n}{counter} \PY{o}{==} \PY{l+m+mi}{0}\PY{p}{)}\PY{p}{:} \PY{c+c1}{\PYZsh{}on the first loop, generate the subplots} \PY{n}{fig}\PY{p}{,} \PY{n}{axs} \PY{o}{=} \PY{n}{plt}\PY{o}{.}\PY{n}{subplots}\PY{p}{(}\PY{l+m+mi}{1}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{)} \PY{k}{if} \PY{n}{counter} \PY{o}{==} \PY{l+m+mi}{4}\PY{p}{:} \PY{c+c1}{\PYZsh{} if we are at the last figure, we want it to be on its own line} \PY{n}{plot\PYZus{}acf}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]}\PY{p}{,} \PY{n}{title} \PY{o}{=} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Autocorrelation for }\PY{l+s+si}{\PYZob{}\PYZcb{}}\PY{l+s+s2}{\PYZdq{}} \PY{o}{.}\PY{n}{format}\PY{p}{(}\PY{l+m+mi}{1995} \PY{o}{+} \PY{l+m+mi}{5} \PY{o}{*} \PY{n}{counter}\PY{p}{)}\PY{p}{)} \PY{n}{plt}\PY{o}{.}\PY{n}{show}\PY{p}{(}\PY{p}{)} \PY{k}{else}\PY{p}{:} \PY{n}{plot\PYZus{}acf}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]}\PY{p}{,} \PY{n}{title} \PY{o}{=} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Autocorrelation for }\PY{l+s+si}{\PYZob{}\PYZcb{}}\PY{l+s+s2}{\PYZdq{}} \PY{o}{.}\PY{n}{format}\PY{p}{(}\PY{l+m+mi}{1995} \PY{o}{+} \PY{l+m+mi}{5} \PY{o}{*} \PY{n}{counter}\PY{p}{)}\PY{p}{,} \PY{n}{ax}\PY{o}{=}\PY{n}{axs}\PY{p}{[}\PY{n}{counter} \PY{o}{\PYZpc{}} \PY{l+m+mi}{2}\PY{p}{]}\PY{p}{)} \PY{k}{if} \PY{n}{counter} \PY{o}{\PYZpc{}} \PY{l+m+mi}{2} \PY{o}{==} \PY{l+m+mi}{1}\PY{p}{:} \PY{c+c1}{\PYZsh{} every two figures, we need to generate a new row} \PY{n}{plt}\PY{o}{.}\PY{n}{show}\PY{p}{(}\PY{p}{)} \PY{k}{if} \PY{p}{(}\PY{n}{counter} \PY{o}{\PYZlt{}} \PY{l+m+mi}{2}\PY{p}{)}\PY{p}{:} \PY{c+c1}{\PYZsh{}a new sub plot is needed on second row} \PY{n}{fig}\PY{p}{,} \PY{n}{axs} \PY{o}{=} \PY{n}{plt}\PY{o}{.}\PY{n}{subplots}\PY{p}{(}\PY{l+m+mi}{1}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{)} \PY{n}{counter} \PY{o}{=} \PY{n}{counter} \PY{o}{+} \PY{l+m+mi}{1} \PY{n}{counter} \PY{o}{=} \PY{l+m+mi}{0}\PY{p}{;} \PY{n}{fig}\PY{p}{,} \PY{n}{axs} \PY{o}{=} \PY{n}{plt}\PY{o}{.}\PY{n}{subplots}\PY{p}{(}\PY{l+m+mi}{1}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)} \PY{k}{for} \PY{n}{year} \PY{o+ow}{in} \PY{n}{weatherData}\PY{p}{:} \PY{n}{plot\PYZus{}acf}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]}\PY{p}{,} \PY{n}{title} \PY{o}{=} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Autocorrelation All 5 years together}\PY{l+s+s2}{\PYZdq{}} \PY{o}{.}\PY{n}{format}\PY{p}{(}\PY{l+m+mi}{1995} \PY{o}{+} \PY{l+m+mi}{5} \PY{o}{*} \PY{n}{counter}\PY{p}{)}\PY{p}{,} \PY{n}{ax}\PY{o}{=}\PY{n}{axs}\PY{p}{)} \PY{n}{plt}\PY{o}{.}\PY{n}{show}\PY{p}{(}\PY{p}{)} \end{Verbatim} \begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_14_0.png} \end{center} { \hspace*{\fill} \\} \begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_14_1.png} \end{center} { \hspace*{\fill} \\} \begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_14_2.png} \end{center} { \hspace*{\fill} \\} \begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_14_3.png} \end{center} { \hspace*{\fill} \\} \subsection{Question 6.a \&6.b}\label{question-6.a-6.b} Next, each year, temp is broken down into subdivisions in the range: \[[minimum-0.1=t_0,maximum+0.1=t_10]\] into ten equal intervals. This is then used to generate 10 intervals, as: \[[[t_0,t_1],...,[t_9,t_10]]\] \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item binCorours are predefined RGB values, given in the question \item The linspace function below is used to generate a set of numbers with equal distributions between the min and max data for each year. \item Digitisation is used to put the data into the respective buckets. This can then be used later on when the graphs are drawn \item generate a matrix to store the image colour values \item iterate over each day of the year*2. double as we need to have two rows for each day(high and low) \item set the current iteration-1 row of pixels to the low digitised index value from colour matrix provided. \item same as above, but for the current iterator for the high values provided \item convert matrix to image \item draw image to screen \end{enumerate} The output below this code shows first the bins for each year, for question 6.a. Then, the blankets are drawn for question 6.b \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}8}]:} \PY{n}{binColours} \PY{o}{=} \PY{p}{[}\PY{p}{[}\PY{l+m+mf}{0.139681}\PY{p}{,} \PY{l+m+mf}{0.311666}\PY{p}{,} \PY{l+m+mf}{0.550652}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.276518}\PY{p}{,} \PY{l+m+mf}{0.539432}\PY{p}{,} \PY{l+m+mf}{0.720771}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.475102}\PY{p}{,} \PY{l+m+mf}{0.695344}\PY{p}{,} \PY{l+m+mf}{0.802081}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.670448}\PY{p}{,} \PY{l+m+mf}{0.803486}\PY{p}{,} \PY{l+m+mf}{0.824645}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.809791}\PY{p}{,} \PY{l+m+mf}{0.848259}\PY{p}{,} \PY{l+m+mf}{0.777550}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.861927}\PY{p}{,} \PY{l+m+mf}{0.803423}\PY{p}{,} \PY{l+m+mf}{0.673050}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.830690}\PY{p}{,} \PY{l+m+mf}{0.667645}\PY{p}{,} \PY{l+m+mf}{0.546349}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.742023}\PY{p}{,} \PY{l+m+mf}{0.475176}\PY{p}{,} \PY{l+m+mf}{0.424114}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.613033}\PY{p}{,} \PY{l+m+mf}{0.281826}\PY{p}{,} \PY{l+m+mf}{0.306352}\PY{p}{]}\PY{p}{,} \PY{p}{[}\PY{l+m+mf}{0.450385}\PY{p}{,} \PY{l+m+mf}{0.157961}\PY{p}{,} \PY{l+m+mf}{0.217975}\PY{p}{]}\PY{p}{]} \PY{n}{counter} \PY{o}{=} \PY{l+m+mi}{0}\PY{p}{;} \PY{k}{for} \PY{n}{year} \PY{o+ow}{in} \PY{n}{weatherData}\PY{p}{:} \PY{n}{num} \PY{o}{=} \PY{l+m+mi}{10} \PY{n}{binsLow} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{linspace}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{3}\PY{p}{]}\PY{o}{.}\PY{n}{min}\PY{p}{(}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mf}{0.1}\PY{p}{,} \PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{3}\PY{p}{]}\PY{o}{.}\PY{n}{max}\PY{p}{(}\PY{p}{)} \PY{o}{+} \PY{l+m+mf}{0.1}\PY{p}{,} \PY{n}{num}\PY{p}{)} \PY{n}{binsHigh} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{linspace}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{]}\PY{o}{.}\PY{n}{min}\PY{p}{(}\PY{p}{)} \PY{o}{\PYZhy{}} \PY{l+m+mf}{0.1}\PY{p}{,} \PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{]}\PY{o}{.}\PY{n}{max}\PY{p}{(}\PY{p}{)} \PY{o}{+} \PY{l+m+mf}{0.1}\PY{p}{,} \PY{n}{num}\PY{p}{)} \PY{n}{digitizedLow} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{digitize}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{3}\PY{p}{]}\PY{p}{,} \PY{n}{binsLow}\PY{p}{)} \PY{c+c1}{\PYZsh{}put the data into the bins} \PY{n}{digitizedHigh} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{digitize}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{3}\PY{p}{]}\PY{p}{,} \PY{n}{binsHigh}\PY{p}{)} \PY{n}{rows} \PY{o}{=} \PY{l+m+mi}{2} \PY{o}{*} \PY{n+nb}{len}\PY{p}{(}\PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{3}\PY{p}{]}\PY{p}{)} \PY{n}{image} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{p}{(}\PY{n}{rows}\PY{p}{,} \PY{l+m+mi}{451}\PY{p}{,} \PY{l+m+mi}{3}\PY{p}{)}\PY{p}{)} \PY{c+c1}{\PYZsh{}make a matrix to store the image} \PY{c+c1}{\PYZsh{}itterate over each year\PYZsq{}s values from the above values and set pixels colours} \PY{k}{for} \PY{n}{x} \PY{o+ow}{in} \PY{n+nb}{range}\PY{p}{(}\PY{l+m+mi}{0}\PY{p}{,} \PY{n}{rows}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{)}\PY{p}{:} \PY{n}{image}\PY{p}{[}\PY{n}{x} \PY{o}{\PYZhy{}} \PY{l+m+mi}{1}\PY{p}{,} \PY{l+m+mi}{0}\PY{p}{:}\PY{l+m+mi}{451}\PY{p}{]} \PY{o}{=} \PY{n}{binColours}\PY{p}{[}\PY{n+nb}{int}\PY{p}{(}\PY{n}{digitizedLow}\PY{p}{[}\PY{n+nb}{int}\PY{p}{(}\PY{n}{x} \PY{o}{/} \PY{l+m+mi}{2}\PY{p}{)}\PY{p}{]}\PY{p}{)}\PY{p}{]} \PY{n}{image}\PY{p}{[}\PY{n}{x}\PY{p}{,} \PY{l+m+mi}{0}\PY{p}{:}\PY{l+m+mi}{451}\PY{p}{]} \PY{o}{=} \PY{n}{binColours}\PY{p}{[}\PY{n+nb}{int}\PY{p}{(}\PY{n}{digitizedHigh}\PY{p}{[}\PY{n+nb}{int}\PY{p}{(}\PY{n}{x} \PY{o}{/} \PY{l+m+mi}{2}\PY{p}{)}\PY{p}{]}\PY{p}{)}\PY{p}{]} \PY{n}{printText}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Tempreture blanket for year: }\PY{l+s+si}{\PYZob{}\PYZcb{}}\PY{l+s+s2}{\PYZdq{}}\PY{o}{.}\PY{n}{format}\PY{p}{(}\PY{l+m+mi}{1995} \PY{o}{+} \PY{l+m+mi}{5} \PY{o}{*} \PY{n}{counter}\PY{p}{)}\PY{p}{)} \PY{n}{displayHTML}\PY{p}{(}\PY{n}{pd}\PY{o}{.}\PY{n}{DataFrame}\PY{p}{(}\PY{n}{np}\PY{o}{.}\PY{n}{column\PYZus{}stack}\PY{p}{(}\PY{p}{(}\PY{n}{binsLow}\PY{o}{.}\PY{n}{reshape}\PY{p}{(}\PY{l+m+mi}{10}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}\PY{p}{,} \PY{p}{(}\PY{n}{binsHigh}\PY{o}{.}\PY{n}{reshape}\PY{p}{(}\PY{l+m+mi}{10}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}\PY{p}{)}\PY{p}{)}\PY{p}{)}\PY{p}{,} \PY{n}{columns}\PY{o}{=}\PY{p}{[}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{Low bin}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{High bin}\PY{l+s+s2}{\PYZdq{}}\PY{p}{]}\PY{p}{)}\PY{o}{.}\PY{n}{to\PYZus{}html}\PY{p}{(}\PY{p}{)}\PY{p}{)} \PY{n}{outputImage} \PY{o}{=} \PY{n}{smp}\PY{o}{.}\PY{n}{toimage}\PY{p}{(}\PY{n}{image}\PY{p}{)} \PY{n}{outputImage}\PY{o}{.}\PY{n}{save}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{WeatherBlancketsOutput/}\PY{l+s+si}{\PYZob{}\PYZcb{}}\PY{l+s+s1}{.png}\PY{l+s+s1}{\PYZsq{}}\PY{o}{.}\PY{n}{format}\PY{p}{(}\PY{l+m+mi}{1995} \PY{o}{+} \PY{l+m+mi}{5} \PY{o}{*} \PY{n}{counter}\PY{p}{)}\PY{p}{)} \PY{n}{drawImg}\PY{p}{(}\PY{n}{outputImage}\PY{p}{)} \PY{c+c1}{\PYZsh{}Draw image to screen, using custom draw function to put output in window} \PY{n}{counter} \PY{o}{=} \PY{n}{counter} \PY{o}{+} \PY{l+m+mi}{1}\PY{p}{;} \end{Verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \begin{verbatim} <IPython.core.display.HTML object> \end{verbatim} \subsection{Question 7}\label{question-7} Next, a surface plot of a stochastic process is generated. For this, avarages are generated over all 5 sets of data for each year, as well as a standard diviation for each day. Next, a linear space is generated representing the posible tempreture ranges. finally, the PDF is found then plotted in 3D. \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor}11}]:} \PY{n}{plt}\PY{o}{.}\PY{n}{rcParams}\PY{p}{[}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{figure.figsize}\PY{l+s+s1}{\PYZsq{}}\PY{p}{]} \PY{o}{=} \PY{p}{(}\PY{l+m+mi}{30}\PY{p}{,} \PY{l+m+mi}{20}\PY{p}{)} \PY{c+c1}{\PYZsh{}Make the figure for this question bigger} \PY{c+c1}{\PYZsh{}define the variables to store the mean, std deviation, } \PY{c+c1}{\PYZsh{} range for each day(temp), pdfs and a vector for number of days} \PY{n}{meanDay} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{p}{(}\PY{l+m+mi}{365}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}\PY{p}{)} \PY{n}{stdDay} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{p}{(}\PY{l+m+mi}{365}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}\PY{p}{)} \PY{n}{dayArray} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{p}{(}\PY{l+m+mi}{5}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}\PY{p}{)} \PY{n}{pdf} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{p}{(}\PY{l+m+mi}{365}\PY{p}{,} \PY{l+m+mi}{365}\PY{p}{)}\PY{p}{)} \PY{n}{dayRange} \PY{o}{=} \PY{n+nb}{range}\PY{p}{(}\PY{l+m+mi}{0}\PY{p}{,} \PY{l+m+mi}{365}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)} \PY{k}{for} \PY{n}{day} \PY{o+ow}{in} \PY{n}{dayRange}\PY{p}{:} \PY{k}{for} \PY{n}{inx}\PY{p}{,} \PY{n}{year} \PY{o+ow}{in} \PY{n+nb}{enumerate}\PY{p}{(}\PY{n}{weatherData}\PY{p}{)}\PY{p}{:} \PY{n}{dayArray}\PY{p}{[}\PY{n}{inx}\PY{p}{,} \PY{l+m+mi}{0}\PY{p}{]} \PY{o}{=} \PY{n}{year}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]}\PY{p}{[}\PY{n}{day}\PY{p}{]} \PY{n}{meanDay}\PY{p}{[}\PY{n}{day}\PY{p}{]} \PY{o}{=} \PY{n}{dayArray}\PY{o}{.}\PY{n}{mean}\PY{p}{(}\PY{p}{)} \PY{c+c1}{\PYZsh{}calculate the mean} \PY{n}{stdDay}\PY{p}{[}\PY{n}{day}\PY{p}{]} \PY{o}{=} \PY{n}{dayArray}\PY{o}{.}\PY{n}{std}\PY{p}{(}\PY{p}{)} \PY{c+c1}{\PYZsh{} calculate the standard deviation} \PY{c+c1}{\PYZsh{}generate a linear space of all days in the region, for the min to max temp} \PY{n}{tempretureSpace} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{linspace}\PY{p}{(}\PY{n}{meanDay}\PY{o}{.}\PY{n}{min}\PY{p}{(}\PY{p}{)}\PY{p}{,} \PY{n}{meanDay}\PY{o}{.}\PY{n}{max}\PY{p}{(}\PY{p}{)}\PY{p}{,} \PY{n+nb}{len}\PY{p}{(}\PY{n}{weatherData}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{[}\PY{p}{:}\PY{p}{,} \PY{l+m+mi}{2}\PY{p}{]}\PY{p}{)}\PY{p}{)} \PY{c+c1}{\PYZsh{}itterate over the days again, now generating the pdf} \PY{k}{for} \PY{n}{day} \PY{o+ow}{in} \PY{n}{dayRange}\PY{p}{:} \PY{n}{pdf}\PY{p}{[}\PY{n}{day}\PY{p}{,} \PY{p}{:}\PY{p}{]} \PY{o}{=} \PY{n}{norm}\PY{o}{.}\PY{n}{pdf}\PY{p}{(}\PY{n}{tempretureSpace}\PY{p}{,} \PY{n}{meanDay}\PY{p}{[}\PY{n}{day}\PY{p}{]}\PY{p}{,} \PY{n}{stdDay}\PY{p}{[}\PY{n}{day}\PY{p}{]}\PY{p}{)} \PY{c+c1}{\PYZsh{}convert the values to a meshgrid } \PY{c+c1}{\PYZsh{} (return coordinate matrices from coordinate vectors)} \PY{n}{tempretureSpace}\PY{p}{,} \PY{n}{dayRange} \PY{o}{=} \PY{n}{np}\PY{o}{.}\PY{n}{meshgrid}\PY{p}{(}\PY{n}{tempretureSpace}\PY{p}{,} \PY{n}{dayRange}\PY{p}{)} \PY{c+c1}{\PYZsh{}finally, plot it as a 3d surf} \PY{n}{fig} \PY{o}{=} \PY{n}{plt}\PY{o}{.}\PY{n}{figure}\PY{p}{(}\PY{p}{)} \PY{n}{ax} \PY{o}{=} \PY{n}{fig}\PY{o}{.}\PY{n}{gca}\PY{p}{(}\PY{n}{projection}\PY{o}{=}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{3d}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{surf} \PY{o}{=} \PY{n}{ax}\PY{o}{.}\PY{n}{plot\PYZus{}surface}\PY{p}{(}\PY{n}{tempretureSpace}\PY{p}{,} \PY{n}{dayRange}\PY{p}{,} \PY{n}{pdf}\PY{p}{,} \PY{n}{cmap}\PY{o}{=}\PY{n}{cm}\PY{o}{.}\PY{n}{coolwarm}\PY{p}{,} \PY{n}{linewidth}\PY{o}{=}\PY{l+m+mi}{0}\PY{p}{,} \PY{n}{antialiased}\PY{o}{=}\PY{k+kc}{False}\PY{p}{)} \PY{n}{ax}\PY{o}{.}\PY{n}{set\PYZus{}xlabel}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Temperature (C)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{ax}\PY{o}{.}\PY{n}{set\PYZus{}ylabel}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Days}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{ax}\PY{o}{.}\PY{n}{set\PYZus{}zlabel}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{f(x)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{c+c1}{\PYZsh{}add a colour bar on the side to see magnitudes} \PY{n}{fig}\PY{o}{.}\PY{n}{colorbar}\PY{p}{(}\PY{n}{surf}\PY{p}{,} \PY{n}{shrink}\PY{o}{=}\PY{l+m+mf}{0.5}\PY{p}{,} \PY{n}{aspect}\PY{o}{=}\PY{l+m+mi}{5}\PY{p}{)} \PY{n}{plt}\PY{o}{.}\PY{n}{title}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{surface plot of stochastic process}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)} \PY{n}{plt}\PY{o}{.}\PY{n}{show}\PY{p}{(}\PY{p}{)} \end{Verbatim} \begin{center} \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{output_18_0.png} \end{center} { \hspace*{\fill} \\} \begin{Verbatim}[commandchars=\\\{\}] {\color{incolor}In [{\color{incolor} }]:} \end{Verbatim} % Add a bibliography block to the postdoc \end{document}
{ "alphanum_fraction": 0.576152654, "avg_line_length": 61.8944844125, "ext": "tex", "hexsha": "11a770fa0fbdceb40c43daf6af7aca7e1877b26f", "lang": "TeX", "max_forks_count": 1, "max_forks_repo_forks_event_max_datetime": "2019-07-01T16:09:29.000Z", "max_forks_repo_forks_event_min_datetime": "2019-07-01T16:09:29.000Z", "max_forks_repo_head_hexsha": "ba47bf2fe5bf4ab9703cbc6cb2a9c229741e583d", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "SoIidarity/ICAO-Codes-Weather-analysis-", "max_forks_repo_path": "Latex/MainFile.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ba47bf2fe5bf4ab9703cbc6cb2a9c229741e583d", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "SoIidarity/ICAO-Codes-Weather-analysis-", "max_issues_repo_path": "Latex/MainFile.tex", "max_line_length": 428, "max_stars_count": 1, "max_stars_repo_head_hexsha": "ba47bf2fe5bf4ab9703cbc6cb2a9c229741e583d", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "SoIidarity/ICAO-Codes-Weather-analysis-", "max_stars_repo_path": "Latex/MainFile.tex", "max_stars_repo_stars_event_max_datetime": "2019-11-05T12:41:36.000Z", "max_stars_repo_stars_event_min_datetime": "2019-11-05T12:41:36.000Z", "num_tokens": 21478, "size": 51620 }
Sometimes, functions can have the same names. Consider this code: \begin{rustc} trait Foo { fn f(&self); } trait Bar { fn f(&self); } struct Baz; impl Foo for Baz { fn f(&self) { println!("Baz's impl of Foo"); } } impl Bar for Baz { fn f(&self) { println!("Baz's impl of Bar"); } } let b = Baz; \end{rustc} If we were to try to call \code{b.f()}, we'd get an error: \begin{verbatim} error: multiple applicable methods in scope [E0034] b.f(); ^~~ note: candidate #1 is defined in an impl of the trait `main::Foo` for the type `main::Baz` fn f(&self) { println!("Baz's impl of Foo"); } ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ note: candidate #2 is defined in an impl of the trait `main::Bar` for the type `main::Baz` fn f(&self) { println!("Baz's impl of Bar"); } ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \end{verbatim} We need a way to disambiguate which method we need. This feature is called ‘universal function call syntax', and it looks like this: \begin{rustc} Foo::f(&b); Bar::f(&b); \end{rustc} Let's break it down. \begin{rustc} Foo:: Bar:: \end{rustc} These halves of the invocation are the types of the two traits: \code{Foo} and \code{Bar}. This is what ends up actually doing the disambiguation between the two: Rust calls the one from the trait name you use. \begin{rustc} f(&b) \end{rustc} When we call a method like \code{b.f()} using method syntax (see \nameref{sec:syntax_methodSyntax}), Rust will automatically borrow \code{b} if \code{f()} takes \code{\&self}. In this case, Rust will not, and so we need to pass an explicit \code{\&b}. \subsection*{Angle-bracket Form} The form of UFCS we just talked about: \begin{rustc} Trait::method(args); \end{rustc} Is a short-hand. There's an expanded form of this that's needed in some situations: \begin{rustc} <Type as Trait>::method(args); \end{rustc} The \code{<>::} syntax is a means of providing a type hint. The type goes inside the \code{<>}s. In this case, the type is \code{Type as Trait}, indicating that we want \code{Trait}'s version of \code{method} to be called here. The \code{as Trait} part is optional if it's not ambiguous. Same with the angle brackets, hence the shorter form. \blank Here's an example of using the longer form. \begin{rustc} trait Foo { fn foo() -> i32; } struct Bar; impl Bar { fn foo() -> i32 { 20 } } impl Foo for Bar { fn foo() -> i32 { 10 } } fn main() { assert_eq!(10, <Bar as Foo>::foo()); assert_eq!(20, Bar::foo()); } \end{rustc} Using the angle bracket syntax lets you call the trait method instead of the inherent one.
{ "alphanum_fraction": 0.6446623916, "avg_line_length": 23.4601769912, "ext": "tex", "hexsha": "65381fc0224bad0b42052bff7a36078b2e9ecdfa", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "2d86097002e09eb6338a8b2c143da86dec81092e", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Darth-Revan/rust-lang_Doc-LaTeX", "max_forks_repo_path": "src/syntax/universal_function_call_syntax.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "2d86097002e09eb6338a8b2c143da86dec81092e", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Darth-Revan/rust-lang_Doc-LaTeX", "max_issues_repo_path": "src/syntax/universal_function_call_syntax.tex", "max_line_length": 132, "max_stars_count": 1, "max_stars_repo_head_hexsha": "2d86097002e09eb6338a8b2c143da86dec81092e", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Darth-Revan/rust-lang_Doc-LaTeX", "max_stars_repo_path": "src/syntax/universal_function_call_syntax.tex", "max_stars_repo_stars_event_max_datetime": "2017-07-21T14:09:39.000Z", "max_stars_repo_stars_event_min_datetime": "2017-07-21T14:09:39.000Z", "num_tokens": 757, "size": 2651 }
%% Copyright (C) 2009-2012, Gostai S.A.S. %% %% This software is provided "as is" without warranty of any kind, %% either expressed or implied, including but not limited to the %% implied warranties of fitness for a particular purpose. %% %% See the LICENSE file for more information. \chapter{First Steps} \label{sec:tut:first} This section expects that you already know how to run \command{urbi}. If not, please first see \autoref{sec:tut:started}. This section introduces the most basic notions to write \us code. Some aspects are presented only minimally. The goal of this section is to bootstrap yourself with the \us language, to be able to study more in-depth examples afterward. \section{Comments} Commenting your code is crucial, so let's start by learning how to do this in \us. \dfn{Comments} are ignored by the interpreter, and can be left as documentation, reminder, \ldots \us supports \langC and \Cxx style comments: \begin{itemize} \item \langC style comments start with \textcmt{/*} and end with \textcmt{*/}. Contrary to \langC/\Cxx, this type of comments does nest. \item \Cxx style comments start with \textcmt{//} and last until the end of the line. \end{itemize} \begin{urbiscript}[firstnumber=1] 1; // This is a C++ style comment. [00000000] 1 2 + /* This is a C-style comment. */ 2; [00000000] 4 "foo" /* You /* can /* nest */ */ comments. */ "bar"; [00000000] "foobar" \end{urbiscript} \autoref{sec:tut:started} introduced some of the conventions used in this document: frames such as the previous one denote ``\us sessions'', i.e., dialogs between \urbi and you. The output is prefixed by a number between square brackets: this is the date (in milliseconds since the server was launched) at which that line was sent by the server. This is useful at occasions, since \urbi is meant to run many parallel commands. Since these timestamps are irrelevant in documentation, they will often be filled with zeroes. More details about the typesetting of this document (and the other kinds of frames) can be found in \autoref{sec:notations}. \section{Literal values} Several special kinds of ``values'' can be entered directly with a specific syntax. They are called \dfn{literals}, or sometimes \dfn{manifest values}. We just met a first kind of literals: integers. There are several others, such as: \begin{itemize} \item \refObject[Float]{floats}: floating point numbers. \begin{urbiscript} 42; // Integer literal. [00000000] 42 3.14; // Floating point number literal. [00000000] 3.14 \end{urbiscript} \item \refObject[String]{strings}: character strings. \begin{urbiscript} "string"; [00000000] "string" \end{urbiscript} \item \refObject[List]{lists}: ordered collection of values. \begin{urbiscript} [1, 2, "a", "b"]; [00000000] [1, 2, "a", "b"] \end{urbiscript} \item \refObject[Dictionary]{dictionaries}: unordered collection of associations. \begin{urbiscript} ["a" => 1, "b" => 2, 3 => "three"]; [00000000] [3 => "three", "a" => 1, "b" => 2] \end{urbiscript} \item \refObject{nil}: neutral value, or value placeholder. Think of it as the value that fits anywhere. \begin{urbiscript} nil; \end{urbiscript} \item \refObject{void}: absence of value. Think of it as the value that fits nowhere. \begin{urbiscript} void; \end{urbiscript} \end{itemize} These examples highlight some points: \begin{itemize} \item \refObject[List]{Lists} and \refObject[Dictionary]{dictionaries} in \us are heterogeneous. That is, they can hold values of different types. \item The printing of \refObject{nil} and \refObject{void} is empty. \item There are many hyperlinks in this document: clicking on names such as \refObject{Dictionary} will drive you immediately to its specifications. This is also true for slots, such as \refSlot[String]{size}. \end{itemize} \section{Function calls} You can call functions with the classical, mathematical notation. \begin{urbiscript} Math.cos(0); // Compute cosine [00000000] 1 Math.max(1, 3); // Get the maximum of the arguments. [00000000] 3 Math.max(1, 3, 4, 2); [00000000] 4 \end{urbiscript} Again, the result of the evaluation are printed out. You can see here that function in \us can be variadic, that is, take different number of arguments, such as the \lstinline{max} function. Let's now try the \lstindex{echo} function, that prints out its argument. \begin{urbiscript} echo("Hello world!"); [00000000] *** Hello world! \end{urbiscript} The server prints out \lstinline{Hello world!}, as expected. Note that this output is still prepended with the time stamp. Since \lstinline{echo} returns \lstinline{void}, no evaluation result is printed. \section{Variables}\index{variable} Variables can be introduced with the \lstinline{var} keyword, given a name and an initial value. They can be assigned new values with the \lstinline{=} operator. \begin{urbiscript} var x = 42; [00000000] 42 echo(x); [00000000] *** 42 x = 51; [00000000] 51 x; [00000000] 51 \end{urbiscript} Note that, just as in \Cxx, assignments return the (right-hand side) value, so you can write code like ``\lstinline|x = y = 0|''. The rule for valid identifiers is also the same as in \Cxx: they may contain alphanumeric characters and underscores, but they may not start with a digit. You may omit the initialization value, in which case it defaults to \lstinline|void|. \begin{urbiscript} var y; y; // Remember, the interpreter remains silent because void is printed out // as nothing. You can convince yourself that y is actually void with // the following methods. y.asString; [00000000] "void" y.isVoid; [00000000] true \end{urbiscript} \section{Scopes} \dfn{Scopes} are introduced with curly brackets (\lstinline|{}|). They can contain any number of statements. Variables declared in a scope only exist within this scope. \begin{urbicomment} removeSlots("x"); \end{urbicomment} \begin{urbiscript} { var x = "test"; echo(x); }; [00000000] *** test // x is no longer defined here x; [00000073:error] !!! lookup failed: x \end{urbiscript} Note that the interpreter waits for the whole scope to be input to evaluate it. Also note the mandatory terminating semicolon after the closing curly bracket. \section{Method calls} Methods are called on objects with the dot (\lstinline{.}) notation as in \Cxx. Method calls can be chained. Methods with no arguments don't require the parentheses. \begin{urbiscript} 0.cos(); [00000000] 1 "a-b-c".split("-"); [00000000] ["a", "b", "c"] "foo".length(); [00000000] 3 // Method call can be chained "".length().cos(); [00000000] 1 \end{urbiscript} In \lstinline|obj.method|, we say that \lstinline{obj} is the \dfn{target}, and that we are sending him the \lstinline{method} \dfn{message}. \section{Function definition} You know how to call routines, let's learn how to write some. Functions can be declared thanks to the \lstinline{function} keyword, followed by the comma separated, parentheses surrounded list of formal arguments, and the body between curly brackets. \begin{urbiscript} // Define myFunction function myFunction() { echo("Hello world"); echo("from my function!"); }; [00000000] function () { [:] echo("Hello world"); [:] echo("from my function!"); [:]} // Invoke it myFunction(); [00000000] *** Hello world [00000000] *** from my function! \end{urbiscript} Note the strange output after you defined the function. \us seems to be printing the function you just typed in again. This is because a function definition evaluates to the freshly created function. Functions are first class citizen: they are values, just as \lstinline{0} or \lstinline{"foobar"}. The evaluation of a function definition yields the new function, and as always, the interpreter prints out the evaluation result, thus showing you the function again: \begin{urbiscript} // Work in a scope. { // Define f function f() { echo("f") }; // This does not invoke f, it returns its value. f; }; [00000000] function () { echo("f") } { // Define f function f() { echo("Hello World"); }; // This actually calls f f(); }; [00000000] *** Hello World \end{urbiscript} Here you can see that \lstinline{f} is actually a simple value. You can just evaluate it to see its value, that is, its body. By adding the parentheses, you can actually call the function. This is a difference with methods calling, where empty parentheses are optional: method are always evaluated, you cannot retrieve their functional value --- of course, you can with a different construct, but that's not the point here. Since this output is often irrelevant, most of the time it is hidden in this documentation using the \lstinline'|;' trick. When a statement is ``missing'', an empty statement (\lstinline|{}|) is inserted. So \lstinline'\var{code}|;' is actually equivalent to \lstinline'\var{code} | {};', which means ``run \var{code}, then run \lstinline'{}' and return its value''. Since the value of \lstinline'{}' is \lstinline'void', which is not displayed, this is a means to discard the result of a computation, and avoid that something is printed. Contrast the two following function definitions. \begin{urbiscript} function sum(a, b, c) { return a + b + c; }; [00003553] function (var a, var b, var c) { return a.'+'(b).'+'(c) } function sum2(a, b, c) { return a + b + c; }|; sum(20, 2, 20); [00003556] 42 \end{urbiscript} The \lstinline{return} keyword breaks the control flow of a function (similarly to the way \lstinline{break} interrupts a loop) and returns the control flow to the caller. It accepts an optional argument, the value to return to the caller. In \us, if no \lstinline{return} statement is executed, the value of the last expression is returned. Actually, refrained from using \lstinline{return} when you don't need it, it is both less readable (once you get used to this programming style), and less efficient (\autoref{sec:guideline:return}). \begin{urbiscript} function succ(i) { i + 1 }|; succ(50); [00000000] 51 \end{urbiscript} \section{Conclusion} You're now up and running with basic \us code, and we can dive in details into advanced \us code. %%% Local Variables: %%% coding: utf-8 %%% mode: latex %%% TeX-master: "../urbi-sdk" %%% ispell-dictionary: "american" %%% ispell-personal-dictionary: "../urbi.dict" %%% fill-column: 76 %%% End:
{ "alphanum_fraction": 0.7306909584, "avg_line_length": 30.1725146199, "ext": "tex", "hexsha": "d07e167cf4db68e1faa8d9630bf1b17ddf23601f", "lang": "TeX", "max_forks_count": 15, "max_forks_repo_forks_event_max_datetime": "2021-09-28T19:26:08.000Z", "max_forks_repo_forks_event_min_datetime": "2015-01-28T20:27:02.000Z", "max_forks_repo_head_hexsha": "fb17359b2838cdf8d3c0858abb141e167a9d4bdb", "max_forks_repo_licenses": [ "BSD-3-Clause" ], "max_forks_repo_name": "jcbaillie/urbi", "max_forks_repo_path": "doc/tutorial/first-steps.tex", "max_issues_count": 7, "max_issues_repo_head_hexsha": "fb17359b2838cdf8d3c0858abb141e167a9d4bdb", "max_issues_repo_issues_event_max_datetime": "2019-02-13T10:51:07.000Z", "max_issues_repo_issues_event_min_datetime": "2016-09-05T10:08:33.000Z", "max_issues_repo_licenses": [ "BSD-3-Clause" ], "max_issues_repo_name": "jcbaillie/urbi", "max_issues_repo_path": "doc/tutorial/first-steps.tex", "max_line_length": 78, "max_stars_count": 16, "max_stars_repo_head_hexsha": "fb17359b2838cdf8d3c0858abb141e167a9d4bdb", "max_stars_repo_licenses": [ "BSD-3-Clause" ], "max_stars_repo_name": "jcbaillie/urbi", "max_stars_repo_path": "doc/tutorial/first-steps.tex", "max_stars_repo_stars_event_max_datetime": "2021-10-05T22:16:13.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-10T05:50:58.000Z", "num_tokens": 2880, "size": 10319 }
% % This is a basic LaTeX Template % for the Informatics Research Review \documentclass[a4paper,11pt]{article} % Add local fullpage and head macros \usepackage{head,fullpage} % Add graphicx package with pdf flag (must use pdflatex) \usepackage[pdftex]{graphicx} % Better support for URLs \usepackage{url} % Date formating \usepackage{datetime} % For Gantt chart \usepackage{pgfgantt} \usepackage{xcolor} \usepackage[utf8]{inputenc} \newdateformat{monthyeardate}{% \monthname[\THEMONTH] \THEYEAR} \parindent=0pt % Switch off indent of paragraphs \parskip=5pt % Put 5pt between each paragraph \Urlmuskip=0mu plus 1mu % Better line breaks for URLs % This section generates a title page % Edit only the following three lines % providing your exam number, % the general field of study you are considering % for your review, and name of IRR tutor \newcommand{\examnumber}{B131025} \newcommand{\field}{Emergence of Numerals in \\ Multi-agent Autonomous Communication System} \newcommand{\tutor}{Prof. Stuart Anderson} \newcommand{\supervisor}{Dr. Ivan Titov} % added by Shawn \usepackage{url} \usepackage{breakcites} \begin{document} \begin{minipage}[b]{110mm} {\Huge\bf School of Informatics \vspace*{17mm}} \end{minipage} \hfill \begin{minipage}[t]{40mm} \makebox[40mm]{ \includegraphics[width=40mm]{crest.png}} \end{minipage} \par\noindent % Centre Title, and name \vspace*{2cm} \begin{center} \Large\bf Informatics Project Proposal \\ \Large\bf \field \end{center} \vspace*{1.5cm} \begin{center} \bf \examnumber\\ \monthyeardate\today \end{center} \vspace*{5mm} % % Insert your abstract HERE % \begin{abstract} This project aims to develop a new computing simulation method for the emergence of numeral system based on multi-agent autonomous communication system following deep reinforcement learning methodology. With this new method, we do not need to pre-assign any linguistic knowledge to computer models any more. Instead, we can facilitate computational agents to invent their numeral system all from scratch. Thus, this project can help us to evaluate not only the assumptions about the emergence of numeral systems in natural language but the importance of certain factors in this procedure. From the perspective of artificial intelligence, this project may also could facilitate a new technique to address the long-standing natural language understanding problem. \end{abstract} \vspace*{1cm} \vspace*{3cm} Date: \today \vfill {\bf Tutor:} \tutor\\ {\bf Supervisor:} \supervisor \newpage % Through page and setup % fancy headings \setcounter{page}{1} % Set page number to 1 \footruleheight{1pt} \headruleheight{1pt} \lfoot{\small School of Informatics} \lhead{Informatics Project Proposal} \rhead{- \thepage} \cfoot{} \rfoot{Date: \date{\today}} % \tableofcontents \section{Motivation} \label{sec:1intro} How do natural language emerge and evolve? This is a critical question to the field of evolutionary linguistics \cite{macwhinney2013emergence}. As linguistic behaviors are typically not preserved during pre-history \cite{lieberman2006toward}, it is necessary to develop some quantitative methods to overcome this kind of time limit \cite{evans2009myth}. Among all those quantitative methods, computer simulation \footnote{Unless indicated otherwise, computer simulation in this paper refers to the computer simulation methods employed in evolutionary linguistics.} has now become increasingly important since it was first introduced by \cite{hurford1989biological}. During the last two decades, computer simulation methods attracted an increasing number of attention (e.g. \cite{hurford1998approaches, knight2000evolutionary, briscoe2002book, cangelosi2012simulating, christiansen2003language, bickerton2009biological}). However, in all these previous works, the basic linguistic elements (meanings to communicate about, a signalling channel to employ) are are provided from the outset and thus the learning product of models are thus insufficient to test hypotheses about the emergence of these basic linguistic elements, such as the emergence of numerals, pronouns and etc. With the recent rapid development of deep reinforcement learning (DRL), e.g. \cite{mnih2015human, silver2017mastering}, computers are now available to master a variety of complicated cognitive activities. Thus, serveral papers in grounded language learning (GLL), e.g. \cite{hermann2017grounded, mordatch2018emergence}, apply DRL to language game \cite{wittgenstein2009philosophical} and signalling game \cite{lewis2008convention} to facilitate agents to understand natural language or invent a communication protocol that may share some identical characteristics of natural language. Under the assumption that agents should also learn and understand language by interacting with environments and others (as humans do), GLL aims to address the natural language understanding problem with agents that can ground symbols/utterances to their experiences \cite{hill2017understanding}. With these promising progresses in GLL, this project aims to propose a new simulation method for the emergence of specific linguistic phenomanon in multi-agent autonomous communication system trained by DRL techniques to solve some specifically designed language games that can imply linguistics hypotheses or theories. However, as language is a too huge a topic, we limit our project in numeral systems for the following reasons: i) numeral systems are relatively simple and self-contained\cite{james1999numeral}; ii) concepts related to numeral systems are more straightforward to model with numeric representations; iii) functions of emergent numeral systems can be formalised and verified more reliably in simulation. Moreover, with our new simulation method, we can not only further evaluate these hypotheses/theories about the emergence of numeral systems but shape the numeral systems \footnote{Formally, it should be a kind of communication protocol.} invented by computational agents to be similar as those in natural language and thus facilitate agents to understand natural language better (with assumptions from contact linguistics). In the following sections, we would first introduce background knowledge from evolutionary linguistics and GLL respectively. Then, in Section \ref{sec:3questions} we would introduce the high-level architecture and also the proposed language game. Finally, we would analyse the potential risks in this project and give a preliminary timetable of this project. \section{Background} \label{sec:2background} In this section, we would introduce some related works from evolutionary linguistics community and GLL community respectively, as they all share a similar argument and assumption to our work. \subsection{Computer Simulation Methods in Evolutionary Linguistics} \label{ssec:2.1simulation_in_EL} The fact that linguistics behaviours can hardly be recorded during history \cite{lieberman2006toward} makes computer simulation an ``operational" empirical method to verify the hypotheses or theories about the emergence and evolution of linguistics \cite{parisi2007emergence}. In evolutionary linguistics community, it usually consists of 3 steps to transform linguistics theories into computing models \cite{gong2013computer}: i) set up environments where agents can action and communicate; ii) pre-define basic linguistic knowledge to models; iii) analyze the pre-define quantities. We would further discuss the first and second step as they share similar characteristics with our project. With different simulation objectives, the environments can be generally classified into the following 3 types \cite{gong2013computer}: \begin{itemize} \item \textit{Iterated learning}: This was first introduced by \cite{kirby1999function} in which all the individual agents form a chain, learn the underlying linguistics mechanism by observing the precursor and generate behaviours for the next. Basically, this game simulates the cultural transition from generation to generation. \item \textit{Language game}: This was first introduced by \cite{wittgenstein2009philosophical} in which all the individual agents play specific games and develop some shared communication protocols by completing specific tasks. There exist lots of derivations, such as naming game \cite{baronchelli2006sharp}, category game \cite{puglisi2008cultural} and etc. \item \textit{Genetic evolution}: This was first introduced by \cite{briscoe1998language} in which all individual agents have their own genomes encoding idiolects and specific linguistic elements. Based on the assumption of natural selection \cite{darwin1859origin}, agents can get higher and higher values with random mutation in reproduction. \end{itemize} In our project, we also follow the framework of \textbf{language game} like other works in GLL \cite{hermann2017grounded}. However, different from the existing works, we do not need to pre-define any linguistic elements which are introduced as follow. Most of the computing models from the evolutionary linguistics community are to learn bi-directional meaning–utterance mappings (M–U mappings) \cite{gong2013computer}. Thus, different forms of pre-defined linguistics knowledge may result in various kinds of model. Generally speaking, these models can be classified into two categories: i) lexical models; ii) syntactic and grammatical models. In lexical models, such as \cite{steels2005emergence, baronchelli2006sharp, puglisi2008cultural}, the main focus is whether a common lexicon can form during the autonomous communication between agents. On the other hand, in syntactic and grammatical models, agents aim to map semantic items to either structured or unstructured utterances, like \cite{kirby1999function, vogt2005acquisition}. No matter how this mapping function is learnt, e.g. by neural network models \cite{munroe2002learning} or by mathematical equations \cite{minett2008modelling, ke2008language}, the most basic elements of linguistics, such as symbols and rules of generating phrases, are all pre-defined between agents operating on these basic elements. \subsection{Grounded Language Learning} \label{ssec:2.2ma_gll} Natural language understanding is a long-standing and challenging problem in natural language processing and artificial intelligence. With the development of deep learning, most of the current state-of-the-art methods are based on deep learning models trained on the massive static textual corpus. We humans, however, learn and understand languages by interacting with others and the real world, which is a different paradigm from statistical learning. Therefore, GLL argues that agents also need to learn the language by doing so. Basically, in grounded language learning, agents learn to communicate with symbols from a fixed vocabulary by completing tasks in a specific environment, the procedures of which are called games. The games mentioned above derive from the language games first introduced by \cite{wittgenstein2009philosophical} and also the signaling games proposed by \cite{lewis2008convention}. Based on the number of participated agents, we briefly classify such games into 2 categories : i) single-agent game such as \cite{hermann2017grounded, yu2018interactive, das2018embodied}, in which researches mainly explore how could an agent learn natural language from humans; ii) multi-agents games like \cite{mordatch2018emergence, havrylov2017emergence}, in which researches mainly focus on how language emerge and develop during the autonomous communication between agent population. As for the multi-agent games themselves, they are mainly designed to explore the conditions for the emergence and development of natural-language-like communication protocols, in which agents are quite like the primitive human who create natural languages during production, or to say, survival games. To be specific, \cite{mordatch2018emergence} observes that demonstrative pronouns emerge when there exist more than two agents in their game. Meanwhile, it is shown in \cite{jaques2018intrinsic} that collaboration between different agents is a crucial motivation for the development of cooperative communication, especially when each agent holds partial information about the environment. However, instead of aiming to make DRL models generalise better, this project targets at proposing a systematic methodology of facilitating the emergence of numeral systems in a multi-agent autonomous communication system. \section{Architecture and Proposed Language Games} \label{sec:3questions} Although all the previous work on computer simulation in evolutionary linguistics gained promising results, one congenital deficiency of these method is that they lack the capability to simulate the emergence of basic elements of numeral systems, such as cardinal numerals and ordinal numerals. Take the work of \cite{james1999numeral} for example, the semantic primitives is set up at the beginning of simulation as well as the basic cognitive operations. With the success of DRL in GLL, we however propose a new framework that can simulate these basic elements in numeral systems in order to further relax the restrictions on the learning procedure of computing models, which makes it more consistent with the learning procedure of humans. In this section, we would first give an overall illustration of the architecture and components of our simulation system. Then, we would give a specific language game that can simulate the emergence of cardinal numerals. Further, we would propose two more games to simulate the emergence of ordinal numerals and numeral grammars and analyse the main challenges in such simulations. \subsection{Architecture of Proposed Simulation Method} \label{ssec:3.1architecture} We keep to the traditional framework of transforming linguistic theories into simulation mechanism expect that we do not need to set up any artificial language or prior linguistic knowledge to computational agents. Thus, the main steps in our framework are: i) design a language game that implies linguistic hypotheses or theories that need to be verified; ii) implement this language game in a virtual environment; iii) let agents interact with and adapt to the environment\footnote{This step is achieved with DRL technique.}, during which the communication is necessary and autonomous; iv) analyse the experiment results and get the verification results. With respect to different steps, we propose that there are 3 main components of system designed by our method: \begin{enumerate} \item \textbf{Environment}: Virtual environments in our systems are all multi-agent systems, in which agents need to cooperate/compete with each other to complete tasks that are actually some specifically designed language games. By defining observations\footnote{Terminology from reinforcement learning community. More information can be found in \cite{sutton1998introduction}.}, action spaces and reward functions for every agent, we are able to build up specific language games and further imply linguistic hypotheses or theories by carefully designing the mechanisms of these language games. As our goal is to study linguistic phenomena, for all environments, we should always establish communication tunnels for agents and make communication necessary for agents to complete their tasks. \item \textbf{Agents}: As we do not want expose any prior linguistic knowledge to agent, our multi-agent system are all trained by multi-agent reinforcement learning algorithms like MADDPG\cite{lowe2017multi}. Also, as communication is just one kind of actions in virtual environments, the communication behaviours of agents are all spontaneous and autonomous. Thus, based on this assumption, we can verify the hypotheses/theories about the emergence of basic linguistic elements by analysing the produced symbol system among the multi-agent system. \item \textbf{Vocabulary}: As we use a discrete vocabulary, i.e. words in natural language are all separate symbols, we force agents to communicate with a fixed-sized discrete vocabulary in order to simulate our natural language. Considering that the range of real numbers are unlimited, we argue that agents are able to communicate as much information as they want with a single-digit real number and thus it is necessary to limit their communication behaviour to use a fixed-sized discrete vocabulary. Moreover, inspired by symbol grounding proposed by \cite{harnad1990symbol}, we further argue that agents could ground symbols to their observations in virtual environment after learning. \end{enumerate} \subsection{Game for Cardinal Numeral: Food-Gathering Bandit Game} \label{ssec:3.2cardinal_game} As cardinal numerals are the very basic ingredients in numeral systems, we thus first propose the so called ``Food-Gathering Bandit Game" (FGBG) to simulate the emergence of cardinal numerals. Following the architecture demonstrated in subsection \ref{ssec:3.1architecture}, we design this game by specifying that: i) components of this game; ii) objective of agents; iii) game procedure and restrictions. A brief diagram of our proposed game is given in Figure \ref{fig:game1} as follow and more details are described in the following paragraphs. \begin{figure}[!h] \centering \includegraphics[width=0.6\textwidth]{Diagram.pdf} \caption{Diagram of proposed Food-Gathering Bandit Game.}\label{fig:game1} \end{figure} In order to better describe our game configuration, we first introduce the following components of FGBG: \begin{itemize} \item Food Warehouse: There is always 3 specific kinds of food in food warehouse whose quantity is initialised randomly from episode to episode. Besides, the quantity is represented as a single integer $n\in \{1, 2, \dots, 5\}$. \item Speaking Agent: This is the one that can check status of warehouse and send a message to the other agent. However, its action space is limited such that it can not push the bandit. \item Listening Agent: This is the one that first receives message from speaking agent and then can push the bandit according to the message. However, its observation is limited such that it can not observe the status of food warehouse. \item 4-armed Bandit: There are only 44 actions that are supported by the 4-armed bandit, i.e. get an apple, get a banana, get a kiwi or end the episode immediately. \item Message Channel: Messages consist of some discrete symbols that are initially meaningless. However, we assume that different reward configuration would lead to different preference of agents and we thus would further illustrate this in the following paragraph. \end{itemize} The \textbf{Objective} of FGBG is to fulfill the food warehouse with exactly 5 apples, 5 bananas and 5 kiwis. In the beginning of every episode, speaking agent would first check the status of food warehouse and then send a message to the listening agent. After receiving the message, the listening agent would push the bandits. And, once it pushes the bottom bandit, the game ends. If the listening agent pushes the bottom bandit exactly as the numbers needed, i.e. 5 minus numbers of each fruit, both agents would get reward $+100$, otherwise, they would get $-100$. Take the situation in Figure \ref{fig:game1} for example. As there are 3 apples, 5 bananas and 4 kiwis in the food warehouse after initialisation, the desired behaviour of listening agent is to \textit{push the 1st bandit twice, push the 3rd bandit once and then push the end bandit}, after receiving the message from speaking agent. If so, both agents would get reward $+100$ otherwise $-100$. With the assumption that all the information need to communicated between two agents is the number of each fruit in food warehouse, we argue that all the symbols combinations appear as messages should all be combinations of numerals. From a functional perspective, numerals emerged in this game are to describe how many times an action should be executed. Take number $3$ for example, its function is to compress a string consists of 3 same symbols into 1 tokens, e.g. $$3(apple) = apple \ \ apple\ \ apple$$ . Ideally, agents could assign every symbol to a specific number. However, it is also possible that meanings of symbols in their communication are not atomic, i.e. they may use successive several symbols to represent a single number like the 8-digits integers in digital computers. \noindent\textbf{Future Works} Based on FGBG, we could also alter the components to make the simulation be able to express/reflect more linguistic phenomena related to cardinal numerals. For instance, with a penalisation mechanism on the length of messages, we could further verify the hypotheses about the balance between efficiency and ambiguity of language. \label{ssec:3.4garmmar_game} Unlike cardinal numerals and ordinal numerals, the emergence of numeral grammars are not self-supported but rely on other basic elements in numeral systems. Thus, we propose the following hypotheses that can facilitate the emergence of numeral grammars and, more importantly, can be implied in the proposed simulation framework. \begin{enumerate} \item Dynamic changing objective and dialogue: concepts related to arithmetic operations in numeral grammars become necessary when agents need dialogues to capture the dynamic changing objective during completing tasks. For example, in FGBG, once the speaker agent realised that the status of food warehouse changed, it can directly tell the listener how many more apples are needed, which is a more efficient communication protocol. \item Update the existing cardinal numerals: once agents realise the boundness of the existing cardinal numeral systems, they could take n-based positional numeral systems as an update the existing communication protocol (i.e. numeral systems). Assuming that vocabulary could not be enlarged during the simulation, this is the only way for agents to enhance the representation capability of the existing numeral systems. \end{enumerate} %\begin{itemize} % \item Detail the methodology to be used in pursuit of the research and justify this choice. % \item Describe your contributions and novelty and where you % will go beyond the state-of-the-art (new methods, new tools, % new data, new insights, new proofs,...) % \item Describe the programme of work, indicating the research to be undertaken and the milestones that can be used to measure its progress. % \item Where suitable define work packages and define the dependences % between these work packages. WPs and their dependences should be % shown in the Gantt chart in the research plan. % \item Explain how the project will be managed. % \item State the limitations of your research. %\end{itemize} \section{Evaluation} \label{sec:4evaluaion} Key problems here: \begin{itemize} \item How to evaluate the effectiveness of proposed method? (Rewards gained during the game.) \item How to evaluate the emergent numeral system? How to decipher the communication protocol? \end{itemize} \section{Expected Outcomes} \label{sec:5outcomes} As an interdisciplinary project, the expected outcomes of our project are various. First and foremost, the main contribution of this project is a new computer simulation framework for emergence and evolution of numeral systems as well as other topics in evolutionary linguistics. Our work also fills the gaps between existing works in evolutionary linguistics and hypotheses/thoeries about basic elements of language. Moreover, we provide a more powerful tool to verify the hypotheses and theories in evolutionary linguistics. Meanwhile, as we follow the reinforcement learning framework, we would also contribute techniques that estimate the gradients of discrete symbols in vocabulary during the end to end multi-agent training procedure. %Conclude your research proposal by addressing your predicted outcomes. What are you hoping to prove/disprove? Indicate how you envisage your research will contribute to debates and discussions in your particular subject area: % %\begin{itemize} % \item How will your research make an original contribution to knowledge? % \item How might it fill gaps in existing work? % \item How might it extend understanding of particular topics? %\end{itemize} \section{Research Plan, Milestones and Deliverables} \label{sec:6plan} \definecolor{barblue}{RGB}{153,204,254} \definecolor{groupblue}{RGB}{51,102,254} \definecolor{linkred}{RGB}{165,0,33} \begin{figure}[htbp] \begin{ganttchart}[ y unit title=0.4cm, y unit chart=0.5cm, vgrid,hgrid, x unit=1.55mm, time slot format=isodate, title/.append style={draw=none, fill=barblue}, title label font=\sffamily\bfseries\color{white}, title label node/.append style={below=-1.6ex}, title left shift=.05, title right shift=-.05, title height=1, bar/.append style={draw=none, fill=groupblue}, bar height=.6, bar label font=\normalsize\color{black!50}, group right shift=0, group top shift=.6, group height=.3, group peaks height=.2, bar incomplete/.append style={fill=green} ]{2018-06-01}{2018-08-16} \gantttitlecalendar{month=name}\\ \ganttbar[ bar progress label font=\small\color{barblue}, bar progress label node/.append style={right=4pt}, bar label font=\bfseries\normalsize\color{black}, name=pp ]{Background Reading}{2018-06-01}{2018-06-30} \\ \ganttset{progress label text={}, link/.style={black, -to}} \ganttbar[bar label font=\bfseries\normalsize\color{black}]{Game Design}{2018-06-01}{2018-06-07} \\ \ganttgroup{Implementation}{2018-06-07}{2018-06-28} \\ \ganttbar[progress=0, name=T2A]{Environment}{2018-06-07}{2018-06-12} \\ \ganttbar[progress=0]{Agent}{2018-06-10}{2018-06-28} \\ \ganttgroup{Experiment}{2018-06-21}{2018-07-21} \\ \ganttbar[progress=0]{Baselines}{2018-06-21}{2018-06-28} \\ \ganttbar[progress=0]{Proposed Method}{2018-06-28}{2018-07-21} \\ \ganttgroup{Analysis}{2018-06-28}{2018-07-31} \\ \ganttbar[progress=0]{Performance}{2018-06-28}{2018-07-14} \\ \ganttbar[progress=0]{Decipher}{2018-07-07}{2018-07-31} \\ \ganttgroup{Dissertation}{2018-06-07}{2018-08-16} \\ \ganttbar[progress=0]{Intro \& Background}{2018-06-07}{2018-06-21} \\ \ganttbar[progress=0]{Game \& Method}{2018-07-01}{2018-07-14} \\ \ganttbar[progress=0]{Results \& Analysis}{2018-07-21}{2018-08-07} \\ \ganttbar[progress=0]{Conclusion}{2018-08-07}{2018-08-10} \\ \ganttbar[progress=0]{Revise}{2018-08-06}{2018-08-16} \ganttset{link/.style={green}} \end{ganttchart} \caption{Gantt Chart of the activities defined for this project.} \label{fig:gantt} \end{figure} \begin{table}[htbp] \begin{center} \begin{tabular}{|c|c|l|} \hline \textbf{Milestone} & \textbf{Week} & \textbf{Description} \\ \hline $M_1$ & 2 & Feasibility study completed \\ $M_2$ & 5 & First prototype implementation completed \\ $M_3$ & 7 & Evaluation completed \\ $M_4$ & 10 & Submission of dissertation \\ \hline \end{tabular} \end{center} \caption{Milestones defined in this project.} \label{fig:milestones} \end{table} \begin{table}[htbp] \begin{center} \begin{tabular}{|c|c|l|} \hline \textbf{Deliverable} & \textbf{Week} & \textbf{Description} \\ \hline $D_1$ & 6 & Software tool for \dots\\ $D_2$ & 8 & Evaluation report on \dots\\ $D_3$ & 10 & Dissertation \\ \hline \end{tabular} \end{center} \caption{List of deliverables defined in this project.} \label{fig:deliverables} \end{table} \clearpage % Now build the reference list \bibliographystyle{unsrt} % The reference style % This is plain and unsorted, so in the order % they appear in the document. {\small \bibliography{main} % bib file(s). } \end{document}
{ "alphanum_fraction": 0.7796359539, "avg_line_length": 80.9202279202, "ext": "tex", "hexsha": "bfa2bdf23f0b59214032ac956c880d55a6e6d18d", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "ef9786e5bd6c8c456143ad305742340e510f5edb", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "Shawn-Guo-CN/EmergentNumerals", "max_forks_repo_path": "doc/proposal/main.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "ef9786e5bd6c8c456143ad305742340e510f5edb", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "Shawn-Guo-CN/EmergentNumerals", "max_issues_repo_path": "doc/proposal/main.tex", "max_line_length": 1266, "max_stars_count": 2, "max_stars_repo_head_hexsha": "ef9786e5bd6c8c456143ad305742340e510f5edb", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "Shawn-Guo-CN/EmergentNumerals", "max_stars_repo_path": "doc/proposal/main.tex", "max_stars_repo_stars_event_max_datetime": "2019-08-18T18:11:28.000Z", "max_stars_repo_stars_event_min_datetime": "2019-08-16T21:37:55.000Z", "num_tokens": 6458, "size": 28403 }
\documentclass[12pt]{article} \usepackage{graphics} \begin{document} \section*{NYU Physics 1---In-class Exam 4} \vfill \paragraph{Name:} ~ \paragraph{email:} ~ \vfill This exam consists of two problems. Write only in this booklet. Be sure to show your work. \vfill ~ \clearpage \section*{Problem 1} When an olympic diver walks out to the tip of the diving board, the tip sags to a new equilibrium position a distance $y_\mathrm{eq}$ below its ``unloaded'' equilibrium position. Estimate, roughly, this distance $y_\mathrm{eq}$ using the following fact: When the diver is standing at the very end of a diving board, he or she oscillates vertically with a period $T$ of roughly 1.5~s. State, clearly, \emph{all} of your assumptions. For instance, you might have to estimate the typical mass $M$ of an olympic diver. \clearpage \section*{Problem 2} \noindent~\hfill\includegraphics{../mp/wheel_on_spring.eps}\hfill~\\ A wheel of mass $M$, radius $R$ and moment of inertia $I$ rolls without slipping on a horizontal table. It is attached to a spring of spring constant $k$. (a) Do you expect the frequency of oscillations $\omega$ of this system to be greater than, smaller than, or equal to $\sqrt{k/M}$? Give a concise argument. \vfill (b) Compute the frequency of oscillations $\omega$. \vfill ~ \clearpage [This page intentionally left blank for calculations or other work.] \end{document}
{ "alphanum_fraction": 0.7422316384, "avg_line_length": 24.8421052632, "ext": "tex", "hexsha": "c3f967484b3716a4d5388c554cbd9a95d4115b82", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "6723ce2a5088f17b13d3cd6b64c24f67b70e3bda", "max_forks_repo_licenses": [ "CC-BY-4.0" ], "max_forks_repo_name": "davidwhogg/Physics1", "max_forks_repo_path": "tex/old/q04_2003.tex", "max_issues_count": 29, "max_issues_repo_head_hexsha": "6723ce2a5088f17b13d3cd6b64c24f67b70e3bda", "max_issues_repo_issues_event_max_datetime": "2019-01-29T22:47:25.000Z", "max_issues_repo_issues_event_min_datetime": "2016-10-07T19:48:57.000Z", "max_issues_repo_licenses": [ "CC-BY-4.0" ], "max_issues_repo_name": "davidwhogg/Physics1", "max_issues_repo_path": "tex/old/q04_2003.tex", "max_line_length": 70, "max_stars_count": 1, "max_stars_repo_head_hexsha": "6723ce2a5088f17b13d3cd6b64c24f67b70e3bda", "max_stars_repo_licenses": [ "CC-BY-4.0" ], "max_stars_repo_name": "davidwhogg/Physics1", "max_stars_repo_path": "tex/old/q04_2003.tex", "max_stars_repo_stars_event_max_datetime": "2017-11-13T03:48:56.000Z", "max_stars_repo_stars_event_min_datetime": "2017-11-13T03:48:56.000Z", "num_tokens": 395, "size": 1416 }
\section{\texorpdfstring{Relative computations by Oracle TM}{Relative computations by Oracle TM}} \vspace{5mm} \large %2nd lecture \begin{definition}[Oracle TM] \textbf{Oracle} TM is a DTM with an Oracle A (where A is a language) differs from an ordinary DTM by the following: \begin{itemize} \item Oracle tape (with same alphabet as TM) \item 3 special states: QUERY, YES, NO \item In QUERY state TM moves to YES state if word on the oracle tape $\in A$ (moves to NO o/w). After the answer oracle tape is erased (to reuse space in Space complexity). \item Language of the accepted word by an oracle TM M is $L(M, A)$. \end{itemize} \end{definition} \begin{note} For NTM definition works the same. \end{note} \begin{note} Ordinary DTM is the same as oracle DTM with $A = \emptyset$. \end{note} Consider now a comparison of the oracle DTM, when oracle language A is \emph{not fixed in advance}. Computation forms a tree, that branches at every QUERY. \begin{observation} Consider NTM vs Oracle DTM. "$\Rightarrow$". If NTM M has language $L(M)$, set oracle language $A = L(M)$. "$\Leftarrow$". If oracle language is not recognizable (e.g. HALT), we cannot simulate such NTM. \end{observation} \begin{definition}[Turing reducibility] \emph{Turing reducibility} - let A,B languages. We say that A is (deterministically) Turing reducible to B in polynomial time if there $\exists$ an oracle DTM M working in polynomial time such that \[ A = L(M, B), A \leq^T B \] \end{definition} \begin{example} $A \in P \Rightarrow A \leq^T \emptyset$. Since we have polynomial time algorithm without any oracle. \end{example} \begin{definition}[$\TP(A)$] Let $A$ be a language, then \[ \TP(A) = \{ B | B \leq^T A \} \] \end{definition} \begin{definition}[$\TP(\C)$] Let $\C$ be a set of languages then \[ \TP(\C) = \{ B |\, \exists A \in \C: B \leq^T A \} \] \end{definition} \begin{theorem}[$\TP(\TP) = \TP$]\label{p_eq_pp} \[ \TP(\TP) = \TP \] \end{theorem} \begin{proof} $ \TP \subseteq \TP(\TP)$. Let $A \in \TP$, use $A$ as an oracle with 1 QUERY or use empty oracle. $\TP(\TP) \subseteq \TP$. Let $B \in \TP(\TP) \iff \exists A \in \TP \exists ODTM\ M: B = L(M, A)$. To prove the inclusion, we have to construct ordinary DTM that recognizes $B$. Such TM $M_B$ simulates $M$ and whenever $M$ enters QUERY state, simulate $M_A$ to check if word $w$ on oracle tape \[ w \in L(M_A) = A \] Now we have to check time complexity. $M$ makes $p(|x|)$ queries (for $p$ polynomial), as the total number of steps is polynomial. Each query word length is at most $p_w(|x|) = t$. Every query has at most $p_q(|t|)$ steps. In total, query is $p_q(p_w(|x|))$. And the total time complexity of the TM is $p(p_q(p_w(|x|)))$. Which is also polynomial. \end{proof} \begin{definition}[Turing reducibility (N)] \emph{Turing reducibility (non-deterministic)} - let A,B languages. We say that $A$ is non-deterministically Turing reducible to $B$ in polynomial time if there $\exists$ an oracle NTM $M$ working in polynomial time such that \[ A = L(M, B), A \leq^{NP} B \] \end{definition} \begin{definition}[$\TNP(A)$] Let $A$ be a language, then \[ \TNP(A) = \{ B | B \leq^{NP} A \} \] \end{definition} \begin{definition}[$\TNP(\C)$] Let $\C$ be a set of languages then \[ \TNP(\C) = \{ B |\, \exists A \in \C: B \leq^{NP} A \} \] \end{definition} \begin{note} Relativized definition also works for other classes, e.g. EXPTIME. \end{note} \begin{definition}[PS] \[ PS = \bigcup_{i = 0}^{\infty} DS(n^i) = NPS = \bigcup_{i = 0}^{\infty} NS(n^i) \] Where the 2nd equality holds because of Savic theorem \cref{savic}. \end{definition} \begin{definition}[PS(A)] $PS(A) = \{ B | \, B$ accepted by an oracle DTM working in polynomial space, st $B = L(M, A)\}$. Also for class of languages $\C$. \end{definition} \begin{note} \[ \TP \subseteq \TNP \subseteq PS \] Where last inclusion hold because of $NT(f(n)) \subseteq DS(f(n))$. Same proof but as for ordinary TM, but with oracle TM that shares same oracle language A. \end{note} \begin{observation}[Open question] What about $\TNP(\TNP)$? Still an open question, depends on $\TP = \TNP$. We cannot simply plug NTM back to the original TM with oracle, as NTM serving as an oracle could have multiple accepting or rejecting leaves. \end{observation}
{ "alphanum_fraction": 0.6784069982, "avg_line_length": 36.2, "ext": "tex", "hexsha": "7afc77e936c4459ccca5dfa22d0c81a221e07c4a", "lang": "TeX", "max_forks_count": null, "max_forks_repo_forks_event_max_datetime": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_head_hexsha": "cb69e2889ce8374c64aa6ac9faf64ad1ab01add7", "max_forks_repo_licenses": [ "CC0-1.0" ], "max_forks_repo_name": "karlov/NTIN063", "max_forks_repo_path": "src/prednasky/02_prednaska.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "cb69e2889ce8374c64aa6ac9faf64ad1ab01add7", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "CC0-1.0" ], "max_issues_repo_name": "karlov/NTIN063", "max_issues_repo_path": "src/prednasky/02_prednaska.tex", "max_line_length": 158, "max_stars_count": null, "max_stars_repo_head_hexsha": "cb69e2889ce8374c64aa6ac9faf64ad1ab01add7", "max_stars_repo_licenses": [ "CC0-1.0" ], "max_stars_repo_name": "karlov/NTIN063", "max_stars_repo_path": "src/prednasky/02_prednaska.tex", "max_stars_repo_stars_event_max_datetime": null, "max_stars_repo_stars_event_min_datetime": null, "num_tokens": 1414, "size": 4344 }
%!TEX root = ../../main.tex %%%%%%% Begin table for video survellienace anomaly detection \begin{table*} \begin{center} \caption{Examples of DAD techniques used in video surveillance. \\CNN: Convolution Neural Networks, LSTM : Long Short Term Memory Networks \\RBM: Restricted Boltzmann Machine, DNN : Deep Neural Networks \\AE: Autoencoders, DAE: Denoising Autoencoders \\OCSVM: One class Support vector machines, CAE: Convolutional Autoencoders \\SDAE: Stacked Denoising Autoencoders, STN : Spatial Transformer Networks } \label{tab:videoSurvellianceAnomalyDetect} \captionsetup{justification=centering} \scalebox{0.80}{ \begin{tabular}{ | p{3cm} | p{4cm} | p{12cm} |} \hline \textbf{Technique Used} & \textbf{Section} & \textbf{References}\\ \hline CNN & Section ~\ref{sec:cnn} & \cite{dong2016camera},\cite{andrewsaanomaly},\cite{sabokrou2016fully},\cite{sabokrou2017deep},\cite{munawar2017spatio},\cite{li2017transferred},\cite{qiao2017abnormal},\cite{tripathi2018convolutional},\cite{nogas2018deepfall},\cite{christiansen2016deepanomaly},\cite{li2017transferred}\\\hline SAE (AE-CNN-LSTM) & Section ~\ref{sec:ae},~\ref{sec:cnn},~\ref{sec:rnn_lstm_gru} & ~\cite{chong2017abnormal},~\cite{qiao2017abnormal},~\cite{khaleghi2018improved}\\\hline AE & Section ~\ref{sec:ae} & \cite{qiao2017abnormal},\cite{yang2015unsupervised},\cite{chen2015detecting},\cite{gutoskidetection},\cite{d2017autoencoder},\cite{dotti2017unsupervised},\cite{yang2015unsupervised},\cite{chen2015detecting},\cite{sabokrou2016video},\cite{tran2017anomaly},\cite{chen2015detecting} ,\cite{d2017autoencoder},\cite{hasan2016learning},\cite{yang2015unsupervised},\cite{cinelli2017anomaly}\\\hline Hybrid Model (CAE-OCSVM) & Section ~\ref{sec:hybridModels} & ~\cite{gutoskidetection}, ~\cite{dotti2017unsupervised}\\\hline LSTM-AE & Section ~\ref{sec:rnn_lstm_gru}, ~\ref{sec:ae} & ~\cite{d2017autoencoder}\\\hline STN &Section~\ref{sec:stn} & \cite{chianucci2016unsupervised}\\\hline RBM &Section ~\ref{sec:dnn} & \cite{munawar2017spatio}\\\hline LSTM &Section ~\ref{sec:rnn_lstm_gru} &~\cite{medel2016anomaly},~\cite{luo2017remembering},~\cite{ben2018attentioned},~\cite{singh2017anomaly}\\\hline RNN & Section ~\ref{sec:rnn_lstm_gru} &\cite{luo2017revisit},\cite{zhou2015abnormal} ,\cite{hu2016video},~\cite{chong2015modeling}\\\hline AAE & Section ~\ref{sec:gan_adversarial} & ~\cite{ravanbakhsh2017training}\\\hline \end{tabular}} \end{center} \end{table*} %%%%%%%%% End of video survellienace anomaly detection \subsection{Video Surveillance} Video Surveillance also popularly known as Closed-circuit television (CCTV) involves monitoring a designated areas of interest in order to ensure security. In videos surveillance applications unlabelled data is available in large amounts, this is a significant challenge for supervised machine learning and deep learning methods. Hence video surveillance applications have been modelled as anomaly detection problems owing to lack of availability of labelled data. Several works have studied the state-of-the-art deep models for video anomaly detection and have classified them based on the type of model and criteria of detection~\cite{kiran2018overview,chong2015modeling}. The challenges of robust 24/7 video surveillance systems is discussed in detail by Boghossian et.al~\cite{boghossian2005challenges}. The lack of explicit definition of anomaly in real-life video surveillance is a significant issue that hampers the performance of DAD methods as well. DAD techniques used in video surveillance are illustrated in Table ~\ref{tab:videoSurvellianceAnomalyDetect}. % % Datasets Used Table % \begin{table*} % \begin{center} % \caption{Datasets Used For Video surveillance} % \label{tab:videoSurvelliance} % \begin{tabular}{|p{3cm}|p{4cm}|p{6cm}|} % \hline % \textbf{DataSet} & \textbf{Type} & \textbf{References}\\ % \hline % UCSD Ped2~\cite{ucsdAnomalyDetect2017},Subway~\cite{adam2008robust} & Video & Sabokrou et al~\cite{sabokrou2016fully,sabokrou2017deep}, Gutoski et al~\cite{gutoskidetection} \\ \hline % LOST ~\cite{Abrams et al. 2012} & Video & Dotti et al~\cite{dotti2017unsupervised}\\ \hline % YouTube & Video & Yang et al~\cite{yang2015unsupervised}\\ \hline % UMN~\footnote{$http://mha.cs.umn.edu$} & Video & Sabokrou et al~\cite{yang2015unsupervised}\\ \hline % CIFAR-10 &Images& Munawar et al~\cite{munawar2017spatio} \\ \hline % \end{tabular} % \end{center} % \end{table*} % %%%%%%%%% End of Datasets used in video survellienace anomaly detection
{ "alphanum_fraction": 0.7306319547, "avg_line_length": 49.6145833333, "ext": "tex", "hexsha": "7de0208e3b683d0e2b1091404dce4a8a8d7b6ab6", "lang": "TeX", "max_forks_count": 27, "max_forks_repo_forks_event_max_datetime": "2021-12-06T07:59:29.000Z", "max_forks_repo_forks_event_min_datetime": "2019-01-15T02:42:12.000Z", "max_forks_repo_head_hexsha": "aa775990a4b23306885979c4ef8e8cb3ed00441b", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "raghavchalapathy/Deep-Learning-for-Anomaly-Detection-A-Survey_Arxiv_WorkingDocument", "max_forks_repo_path": "ARXIV_DAD_Survey/sections/applications/videoSurvelliance.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "aa775990a4b23306885979c4ef8e8cb3ed00441b", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "raghavchalapathy/Deep-Learning-for-Anomaly-Detection-A-Survey_Arxiv_WorkingDocument", "max_issues_repo_path": "ARXIV_DAD_Survey/sections/applications/videoSurvelliance.tex", "max_line_length": 1073, "max_stars_count": 107, "max_stars_repo_head_hexsha": "aa775990a4b23306885979c4ef8e8cb3ed00441b", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "raghavchalapathy/Deep-Learning-for-Anomaly-Detection-A-Survey", "max_stars_repo_path": "ARXIV_DAD_Survey/sections/applications/videoSurvelliance.tex", "max_stars_repo_stars_event_max_datetime": "2021-11-09T12:03:57.000Z", "max_stars_repo_stars_event_min_datetime": "2019-01-11T12:06:24.000Z", "num_tokens": 1534, "size": 4763 }
\documentclass[11pt]{article} \usepackage{amsmath,amsfonts,amssymb} % basic math \usepackage{graphicx} % images \usepackage{float} % floating image support \usepackage{listings} % code input \usepackage{color} % colors \usepackage[super,comma]{natbib} % citing \usepackage[T1]{fontenc} % fonts \usepackage[utf8]{inputenc} % fonts \usepackage{titling} % title control \usepackage{microtype} \usepackage{bm} \usepackage[utf8]{inputenc} \usepackage[margin=.9in]{geometry} \usepackage[labelfont=bf]{caption} \usepackage{subcaption} \usepackage{indentfirst} \usepackage[affil-it]{authblk} \usepackage{enumitem} \usepackage{scrextend} \usepackage{varioref} \usepackage{hyperref} \usepackage{cleveref} \hypersetup{ colorlinks, citecolor=blue, filecolor=black, linkcolor=blue, urlcolor=blue } \bibliographystyle{unsrtnat} \renewcommand{\bibname}{References} % standard code colors \definecolor{dkgreen}{rgb}{0,0.6,0} \definecolor{gray}{rgb}{0.5,0.5,0.5} \definecolor{mauve}{rgb}{0.58,0,0.82} % setup for python language \lstset{frame=tb, language=Python, aboveskip=3mm, belowskip=3mm, showstringspaces=false, columns=flexible, basicstyle={\small\ttfamily}, numbers=none, numberstyle=\tiny\color{gray}, keywordstyle=\color{blue}, commentstyle=\color{gray}, stringstyle=\color{dkgreen}, breaklines=true, breakatwhitespace=true, tabsize=3 } \title{Generalized multiparticle Mie theory (GMMT)} \date{} \author{} \begin{document} \maketitle \tableofcontents \section{Particle interactions in the GMMT} \subsection{Vector spherical harmonic functions} The generalized multiparticle Mie theory (GMMT) is outlined, following Xu's work. \cite{xu1995electromagnetic} The vector spherical harmonic (VSH) functions are a complete basis set of the vector wave equations \begin{align} \begin{split} \nabla \times \nabla \times \boldsymbol{E} &= k^2 \boldsymbol{E} \\ \nabla \times \nabla \times \boldsymbol{H} &= k^2 \boldsymbol{H} \end{split} \end{align} They are defined as \begin{align} \begin{split} \boldsymbol{N}_{mn1}^{(J)} &= \boldsymbol{\hat r} n(n+1) P_n^m(\theta) \frac{z_n^{(J)}(kr)}{kr}e^{im\phi} \\ &\quad + \frac{1}{kr} \left[ \boldsymbol{\hat \theta} \tau_{mn}(\theta) + \boldsymbol{\hat \phi} i\pi_{mn}(\theta) \right] \frac{d}{dr} \left[ rz_n^{(J)}(kr)e^{im\phi} \right] \\ \boldsymbol{N}_{mn2}^{(J)} &= \big[ \boldsymbol{\hat \theta} i\pi_{mn}(\theta) -\boldsymbol{\hat \phi} \tau_{mn}(\theta) \big] z_n^{(J)}(kr) e^{im\phi} \end{split} \label{eqn:vsh_definition} \end{align} where $J= 1,2,3,4$. The radial functions $z_n^{(J)}$ are \begin{align} \begin{split} \begin{aligned} z_n^{(1)}(x) &= j_n(x) \qquad& z_n^{(3)}(x) &= h_n^{(1)}(x) = j_n(x) + iy_n(x) \end{aligned}\\ \begin{aligned} z_n^{(2)}(x) &= y_n(x) \qquad& z_n^{(4)}(x) &= h_n^{(2)}(x) = j_n(x) - iy_n(x) \end{aligned} \end{split} \end{align} where $j_n$, $y_n$ are the spherical Bessel functions of the first and second kind, and $h_n^{(1)}$, $h_n^{(2)}$ are the spherical Hankel functions of the first and second kind. The angular functions $\pi_{mn}$ and $\tau_{mn}$ are \begin{align} \pi_{mn}(\theta) &= \frac{m}{\sin\theta} P_n^m(\cos\theta) \\ \tau_{mn}(\theta) &= \frac{d}{d\theta} P_n^m(\cos\theta) \end{align} The associated Legendre polynomials $P_n^m$ are defined without the Condon-Shortley phase, i.e.\ \begin{equation} P_n^m(x) = (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_n(x) \end{equation} The VSHs are an orthogonal (but not orthonormal) set when integrated over a closed surface $\Omega$ \begin{align} \begin{split} \langle \boldsymbol{N}_{mn1}^{(J)}, \boldsymbol{N}_{m^\prime n^\prime 1}^{(J)} \rangle &= \int_\Omega \boldsymbol{N}_{mn1}^{(J)} \cdot \boldsymbol{N}_{m^\prime n^\prime 1}^{(J)*} \;d\Omega \\ &= \delta_{mm^\prime}\delta_{nn^\prime}4\pi \frac{n(n+1)(n+m)!}{(2n+1)(n-m)!} \left[ \frac{\left|z_n^{(J)}(kr) + krz_n^{(J)\prime}(kr)\right|^2 + n(n+1) \left|z_n^{(J)}(kr)\right|^2 }{(kr)^2} \right] \\ \langle \boldsymbol{N}_{mn2}^{(J)}, \boldsymbol{N}_{m^\prime n^\prime 2}^{(J)} \rangle &= \int_\Omega \boldsymbol{N}_{mn2}^{(J)} \cdot \boldsymbol{N}_{m^\prime n^\prime 2}^{(J)*} \;d\Omega = \delta_{mm^\prime}\delta_{nn^\prime}4\pi \frac{n(n+1)(n+m)!}{(2n+1)(n-m)!} |z_n^{(J)}(kr)|^2 \\ \langle \boldsymbol{N}_{mn1}^{(J)}, \boldsymbol{N}_{m^\prime n^\prime 2}^{(J)} \rangle &= \int_\Omega \boldsymbol{N}_{mn1}^{(J)} \cdot \boldsymbol{N}_{m^\prime n^\prime 2}^{(J)*} \;d\Omega = 0 \end{split} \end{align} The table below outlines the physical interpretation of the VSHs \begin{center} \begin{tabular}{|l l|} \hline \textbf{Entity} & \textbf{Physical interpretation} \\ [0.5ex] \hline\hline $\bm{N}_{mn1}$ & electric (TM) modes \\ $\bm{N}_{mn2}$ & magnetic (TE) modes \\ \hline $n$ & multipolar order (1: dipole, 2: quadrupole, etc.) \\ $m$ & azimuthal order (from $-n$ to $n$) \\ \hline $J=1$ & propagating incident mode \\ $J=2$ & counter-propagating incident mode \\ $J=3$ & spherically outgoing mode \\ $J=4$ & spherically ingoing mode \\ \hline \end{tabular} \end{center} \subsection{Field expansions} The source, incident, scattered, and interior electric and magnetic fields of particle $j$ can be expanded in terms of the VSHs. The incident field includes the source field plus the incident field from all other particles in the system. \hfill \textit{Electric field} \begin{subequations} \begin{align} \boldsymbol{E}_\text{src}^j &= - \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sum_{r=1}^2 iE_{mn} p_{mnr}^{j,\text{src}} \boldsymbol{N}_{mnr}^{(1)} \\ \boldsymbol{E}_\text{inc}^j &= - \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sum_{r=1}^2 iE_{mn} p_{mnr}^{j,\text{inc}} \boldsymbol{N}_{mnr}^{(1)} \\ \boldsymbol{E}_\text{scat}^j &= \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sum_{r=1}^2 iE_{mn} p_{mnr}^{j,\text{scat}} \boldsymbol{N}_{mnr}^{(3)} \\ \boldsymbol{E}_\text{int}^j &= - \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sum_{r=1}^2 iE_{mn} p_{mnr}^{j,\text{int}} \boldsymbol{N}_{mnr}^{(1)} \end{align} \label{eqn:electric_field_expansion} \end{subequations} \textit{Magnetic field} \begin{subequations} \begin{align} \boldsymbol{H}_\text{src}^j &= - \sqrt{\frac{\varepsilon_b}{\mu_b}} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sum_{r=1}^2 E_{mn} p_{mn\bar r}^{j,\text{src}} \boldsymbol{N}_{mnr}^{(1)} \\ \boldsymbol{H}_\text{inc}^j &= - \sqrt{\frac{\varepsilon_b}{\mu_b}} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sum_{r=1}^2 E_{mn} p_{mn\bar r}^{j,\text{inc}} \boldsymbol{N}_{mnr}^{(1)} \\ \boldsymbol{H}_\text{scat}^j &= \sqrt{\frac{\varepsilon_b}{\mu_b}} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sum_{r=1}^2 E_{mn} p_{mn\bar r}^{j,\text{scat}} \boldsymbol{N}_{mnr}^{(3)} \\ \boldsymbol{H}_\text{int}^j &= - \sqrt{\frac{\varepsilon^j}{\mu^j}} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sum_{r=1}^2 E_{mn} p_{mn\bar r}^{j,\text{int}} \boldsymbol{N}_{mnr}^{(1)} \end{align} \label{eqn:magnetic_field_expansion} \end{subequations} where $\bar r = 3-r$ and $E_{mn}$ is a normalization factor \begin{equation} E_{mn} = i^n \sqrt{\frac{(2n+1)(n-m)!}{n(n+1)(n+m)!}} \end{equation} \subsection{VSH translation coefficients} The VSH functions computed in one coordinate system ($\bm{r}_l$) can be converted to a different coordinate system ($\bm{r}_j$) by use of the translation coefficient $\widetilde{A}_{mnruvs}^{jl}$ \begin{equation} \bm{N}_{mnr}^{(J)}(k\bm{r}_j) = \sum_{v=1}^\infty \sum_{u=-v}^{u=v} \sum_{s=1}^2 \widetilde{A}_{mnruvs}^{(J)jl} \bm{N}_{uvs}^{(1)}(k\bm{r}_l) \label{eqn:VSHW_translation} \end{equation} Here, the fields being translated from are incident fields (hence $J=1$), and the fields being translated to can correspond to any $J$ value. If $J=1$, $\widetilde{A}$ translates incident fields to scattered fields, and if $J=3$, $\widetilde{A}$ translates incident fields to incident fields. Explicit formula for the translation coefficients can be found elsewhere. \cite{Xu_1998} From the field expansions, \cref{eqn:electric_field_expansion}, the VSH translation coefficients can be used to relate the incident expansion coefficients of particle $l$ to the expansion coefficients around particle $j$, \begin{equation} p_{mnr}^{j,(J)} = f^{(J)}\sum_{v=1}^\infty \sum_{u=-v}^{u=v} \sum_{s=1}^2 A_{mnruvs}^{(J)jl} p_{uvs}^{l,\text{inc}} \label{eqn:VSHW_translation_normalized} \end{equation} where $A_{mnruvs}^{(J)jl}$ are the \emph{normalized} translation coefficients \begin{equation} A_{mnruvs}^{(J)jl} = \frac{E_{uv}}{E_{mn}} \widetilde{A}_{mnruvs}^{(J)jl} \end{equation} and $f^{(J)}$ is a $\pm 1$ sign term \begin{equation} f^{(J)} = \begin{cases} +1 \qquad \text{if } J = 3 \\ -1 \qquad \text{otherwise} \end{cases} \label{eqn:outgoing_factor} \end{equation} The translation coefficients have the following useful symmetry relationships \cite{hovenier1996light} \begin{subequations} \label{eqn:vsh_symmetry} \begin{align} A_{mnruvs}^{ij} &= (-1)^{n+v+r+s} A_{mnruvs}^{ji} \\ &= (-1)^{n+v+m+u} A_{-uvs-mnr}^{ji} \\ &= (-1)^{m+u}[A_{uvsmnr}^{ji}]^* \label{eqn:vsh_symmetry_special} \footnotemark \end{align} \end{subequations} \footnotetext{ The conjugate in \cref{eqn:vsh_symmetry_special} applies to everything but the radial function $z_n^{(J)}$ appearing in the sum found in the definition of $A^{ij}$. If $J=1$ or $J=2$ and the medium is non-absorbing, this exception can be ignored since $z_n$ is real. } \subsection{VSH rotation coefficients} The rotation of the expansion coefficients is given by the Wigner-D matrix \begin{equation} p_{mnr}^\prime = D_{ms}^{n}(\hat q) p_{mns} \end{equation} where $\hat q$ represents the rotation. \subsection{T-matrix formulation} \newcommand{\tmatrix}{\mathcal{T}} The T-matrix of particle $j$, $\tmatrix_{mnruvs}^j$, relates the incident expansion coefficients to the scattered expansion coefficients for an arbitrary, non-spherical particle. To simplify the index notation, a multi-index is used to denote a given mode, i.e.\ $\alpha = (m,n,r)$. Greek letters are used to represent a multi-index. Furthermore, Einstein notation is used so that repeated multi-indices are always summed over. Then the T-matrix is defined as \begin{equation} p_\alpha^{j,\text{scat}} = \tmatrix_{\alpha\beta}^j p_\beta^{j,\text{inc}} \label{eqn:tmatrix_defintion} \end{equation} If the particle is a sphere, then the T-matrix is diagonal, \begin{equation} \tmatrix_{\alpha\beta}^j = t_{rn}^j \delta_{\alpha\beta} \label{eqn:tmatrix_sphere} \end{equation} where $t_{1n}^j = a_n^j$ and $t_{2n}^j = b_n^j$ are the classical Mie theory coefficients of the $j$th sphere.\cite{bohren2008absorption} Once a particle's T-matrix is known, it does not have to be recomputed for a new orientation of the particle, provided the geometrical and material properties of the particle remain the same. Given a particle T-matrix in one coordinate system, the T-matrix in a rotated coordinate system is \begin{equation} \tmatrix_{m^\prime nr u^\prime vs}^\prime = D_{m^\prime m}^n(\hat q) \lbrack D_{u^\prime u}^v(\hat q) \rbrack^* \tmatrix_{mnruvs} \end{equation} A particle also has an \emph{internal} T-matrix, $\tmatrix^\text{int}$, that relates the internal expansion coefficients to the incident coefficients \begin{equation} p_\alpha^{j,\text{int}} = \tmatrix_{\alpha\beta}^{j,\text{int}} p_\beta^{j,\text{inc}} \label{eqn:tmatrix_internal_defintion} \end{equation} \subsection{Interaction equations} The interaction equation among $N$ particles can be written as \begin{equation} p_{\alpha}^{j,\text{inc}} = p_{\alpha}^{j,\text{src}} - A_{\alpha \beta}^{(3)jl} \tmatrix_{\beta \gamma}^l p_{\gamma}^{l,\text{inc}} \label{eqn:particle_interactions} \end{equation} More explicitly, dropping the Einstein summation rule: \begin{equation} p_{\alpha}^{j,\text{inc}} = p_{\alpha}^{j,\text{src}} - \sum_{l \neq j}^{(1,N)} \sum_{\beta} \sum_{\gamma} A_{\alpha \beta}^{(3)jl} \tmatrix_{\beta \gamma}^l p_{\gamma}^{l,\text{inc}} \end{equation} Even more explicitly, the multi-index can be unraveled: \begin{equation} p_{mnr}^{j,\text{inc}} = p_{mnr}^{j,\text{src}} - \sum_{l \neq j}^{(1,N)}\sum_{v=1}^{L_\text{max}} \sum_{u=-v}^{v} \sum_{s=1}^{2} \sum_{v^\prime=1}^{L_\text{max}} \sum_{u^\prime=-v^\prime}^{v^\prime} \sum_{s^\prime=1}^{2} A_{mnruvs}^{(3)jl} \tmatrix_{uvsu^\prime v^\prime s^\prime}^l p_{u^\prime v^\prime s^\prime}^{l,\text{inc}} \end{equation} If the system consists entirely of spheres, then using \cref{eqn:tmatrix_sphere}, the interaction equation becomes \begin{equation} p_{mnr}^{j,\text{inc}} = p_{mn}^{j,\text{src}} - \sum_{l \neq j}^{(1,N)}\sum_{v=1}^{L_\text{max}} \sum_{u=-v}^{v} \sum_{s=1}^2 A_{mnruvs}^{(3)jl} t_{sv}^l p_{uvs}^{l,\text{inc}} \label{eqn:sphere_interations} \end{equation} Whether the cluster is composed solely of spheres or more generally non-spherical particles, the interaction equation is always of the form \begin{equation} p_{\alpha}^{j,\text{inc}} = p_{\alpha}^{j,\text{src}} - \tmatrix_{\alpha \beta}^{jl} p_{\beta}^{l,\text{inc}} \end{equation} where $\tmatrix_{\alpha \beta}^{jl}$ is the \emph{inverse particle aggregate T-matrix}. By setting $p_\alpha^{j,\text{inc}} = \delta_{\alpha\beta} \delta_{jl} p_\beta^{l,\text{inc}}$, the interaction equation can be rewritten in the standard form for linear systems, $[A]x=b$, \begin{equation} \left[ \delta_{\alpha\beta} \delta_{jl} + \tmatrix_{\alpha \beta}^{jl} \right] p_{\beta}^{l,\text{inc}} = p_{\alpha}^{j,\text{src}} \end{equation} \subsection{Interactions with a substrate} The interactions of the particles with the interface of a substrate can be included in the GMMT. This is done by simultaneously matching the boundary conditions on the surface of the particles (using VSHW functions) and on the interface (using a plane wave expansion). \cite{mackowski2008exact} We introduce a reflection matrix $\widetilde{R}_{\alpha\beta}^{jl}$ from particle $l$ to $j$, similar to that of direct translations in \cref{eqn:VSHW_translation} \begin{equation} \bm{N}_{\alpha}^{(3),\text{ref}}(k\bm{r}_j) = \widetilde{R}_{\alpha\beta}^{jl} \bm{N}_{\beta}^{(1)}(k\bm{r}_l) \label{eqn:VSHW_reflection_translation} \end{equation} This equation relates the scattered, reflected VSHW functions in terms of incident VSHW functions around a different coordinate system. Expressions for the $\widetilde{R}_{\alpha\beta}^{jl}$ matrix in terms of substrate material and particle location is provided elsewhere. \cite{mackowski2008exact} As before, these reflection coefficients are normalized to relate to the expansion coefficients \begin{equation} p_{\alpha}^{j,\text{ref,scat}} = f^{(J)}\sum_{v=1}^\infty \sum_{u=-v}^{u=v} \sum_{s=1}^2 R_{\alpha\beta}^{(J)jl} p_{\beta}^{l,\text{inc}} \label{eqn:VSHW_reflection_translation_normalized} \end{equation} The interaction equations then become \begin{equation} p_{\alpha}^{j,\text{inc}}= p_{\alpha}^{j,\text{src}} + p_{\alpha}^{j,\text{src,ref}} - \left( A_{\alpha \beta}^{(3)jl} + R_{\alpha \beta}^{jl} \right) \tmatrix_{\beta \gamma}^l p_{\gamma}^{l,\text{inc}} \label{eqn:particle_interactions_substrate} \end{equation} where $p_{\alpha}^{j,\text{src,ref}}$ are the expansion coefficients of the source reflected off of the substrate. This approach can be extended to multiple substrates (layered media) \cite{egel2014dipole}. It can also be extended to non-spherical particles very close to the substrate where the Rayleigh hypothesis fails. \cite{egel2016light} \section{Symmetries} Symmetries in the system can be used to reduce the total number of interaction equations. It is important to remember that not only do the particles have to satisfy a given symmetry (their position, size, orientation, and material), but that the incident source field must also satisfy it. In many cases, a phase factor must be included with a given symmetry depending on the incident field. For example, if the incident field is $x$-polarized and the mirror is in the $yz$ plane, the mirrored fields will be $-x$-polarized. This can be corrected by including a $\pi$ phase factor in the mirror symmetry (multiplying by $-1$). On the other hand, if the incident field is right-hand-circularly polarized and a mirror symmetry is imposed, the mirrored fields will be left-hand-circularly polarized. This cannot be corrected by using a phase factor. \subsection{Translational symmetry (periodic boundary conditions)} If the unit cell consists of a single particle, \begin{equation} p_{\alpha}^{\text{inc}} = p_{\alpha}^{\text{src}} - \Phi^l A_{\alpha \beta}^{(3)0l} \tmatrix_{\beta \gamma} p_{\gamma}^{\text{inc}} \end{equation} where $l$ enumerates the unit cells and $\Phi^l$ is a phase factor. For a plane wave with a given $\bm{k}$ vector, $\Phi^l = \exp(-i\bm{k} \cdot \bm{r}^l)$. If the unit cell consists of $N$ particles, let $l$ enumerate the positions of the unit cell and $j$ enumerate the positions of the particles relative to the unit cell position \begin{equation} p_{\alpha}^{j,\text{inc}} = p_{\alpha}^{j,\text{src}} - \Phi^l A_{\alpha \beta}^{(3)j 0} A_{\beta \gamma}^{(3)0l} A_{\gamma \delta}^{(3)l j^\prime} \tmatrix_{\delta \epsilon}^{j^\prime} p_{\epsilon}^{j^\prime,\text{inc}} \end{equation} Since three VSHW translations are performed (one to the center of the unit cell, one to the origin, and one back to a particle), this approach comes with a loss of information. This loss of information can be avoided by performing the translation directly, at the cost performance \begin{equation} p_{\alpha}^{j,\text{inc}} = p_{\alpha}^{j,\text{src}} - \Phi^{j^\prime} A_{\alpha \beta}^{(3)j j^\prime} \tmatrix_{\beta \gamma}^{j^\prime} p_{\gamma}^{j^\prime,\text{inc}} \end{equation} \subsection{Mirror symmetry} Suppose that there is a mirror plane that passes through the origin and has a normal vector of $\bm{\hat x}$, $\bm{\hat y}$, or $\bm{\hat z}$. Each particle at position $\bm{r}^j$ then has a corresponding mirror particle at position $\bm{r}^{\prime j}$. By symmetry, the expansion coefficients of the mirror particle are related to those of the original particle \begin{equation} p^{\prime j,\text{inc}}_{mnr} = \Phi (-1)^{r+1} p^{j,\text{inc}}_{-mnr} \label{eqn:mirror_symmetry} \end{equation} i.e.\ the mirror negates the azimuthal index $m$ and negates the TM modes ($r=2$). \subsection{Rotational symmetry} Suppose the system has a discrete rotational symmetry of order $R$. If a particle exists at position $r^{j1}$, then the symmetric particles are at position $r^{jr}$, where $r = 2..R$. By symmetry, \begin{equation} p^{jr,\text{inc}}_{mnr} = D_{mun}(\theta_r) p^{j1,\text{inc}}_{mur} \label{eqn:rotational_symmetry} \end{equation} Note that this requires the polarization of the light to be circular. \section{Cluster properties} \subsection{Cluster coefficients and cluster T-matrix} The expansion coefficients of the entire cluster, $p_{\alpha}^{\text{cluster}}$, are computed by translating the individual particle coefficients $p_{\alpha}^{j,\text{scat}}$ to the origin $\bm{r_0}$ \begin{equation} p_{\alpha}^{\text{cluster}} = A_{\alpha \beta}^{(1)0j} p_{\beta}^{j,\text{scat}} \label{eqn:cluster_coefficients} \end{equation} Note that $J=1$ in the translation coefficient since the translation is from the scattered fields around one origin to the scattered fields around a different origin. The \emph{inverse particle aggregate T-matrix} is defined by \begin{equation} \tmatrix_{\alpha\beta}^{jl} = A_{\alpha \gamma}^{(3)jl} \tmatrix_{\gamma \beta}^l \label{eqn:particle_aggregate_tmatrix} \end{equation} and the interaction equation becomes \begin{equation} p_{\alpha}^{j,\text{inc}} = p_{\alpha}^{j,\text{src}} - \tmatrix_{\alpha \beta}^{jl} p_{\beta}^{l,\text{inc}} \end{equation} The \emph{cluster T-matrix} is defined such that \begin{equation} p_\alpha^{\text{cluster}} = \tmatrix_{\alpha\beta}^{\text{cluster}} p_\beta^{0,\text{src}} \label{eqn:cluster_tmatrix} \end{equation} That is, the cluster T-matrix treats the system of particles as if it were a single scattering object, hiding the internal details. \subsection{Cross-sections} The cross-sections can be computed via two methods: one that uses the cluster coefficients $p_\alpha^\text{cluster}$ and one that uses the individual particle coefficients $p_\alpha^j$. \hfill \textit{Cross-sections via individual particle coefficients} \begin{subequations} \begin{align} C_\text{abs} &= \frac{4\pi}{k^2} \sum_{j=1}^N \sum_{\alpha} \text{Re} \left\{(p_\alpha^{j,\text{inc}})^* p_\alpha^{j,\text{scat}} \right\} - |p_\alpha^{j,\text{scat}}|^2\\ C_\text{ext} &= \frac{4\pi}{k^2} \sum_{j=1}^N \sum_{\alpha} \text{Re} \left\{(p_\alpha^{j,\text{src}})^* p_\alpha^{j,\text{scat}} \right\} \\ C_\text{scat} &= C_\text{ext} - C_\text{abs} \end{align} \label{eqn:cross_sections} \end{subequations} This approach is the most efficient way to compute the total cross-sections. Each term in the absorption/extinction cross-section sum can be interpreted as the absorption/extinction of that individual particle due to a given mode $\alpha$. \hfill \textit{Cross-sections via cluster coefficients} \cite{xu1995electromagnetic} \begin{subequations} \begin{align} C_\text{scat} &= \frac{4\pi}{k^2} \sum_{\alpha} |p_{\alpha}^\text{cluster}|^2 \\ C_\text{ext} &= \frac{4\pi}{k^2} \sum_{\alpha} \text{Re} \left\{ (p_\alpha^{0,\text{src}})^* p_\alpha^\text{cluster} \right\} \\ C_\text{abs} &= C_\text{ext} - C_\text{scat} \end{align} \end{subequations} This approach has a benefit in its interpretation. Each term in the scattering sum corresponds to the multipolar scattering of the $\alpha$ mode, so that the scattering from the entire cluster can be identified as electric or magnetic in nature, dipole, quadrupole, etc. These equations should typically be avoided in calculating total cross-sections since there is a loss of information in using the cluster coefficients and they may not converge. All of these cross-sections have units of (area)$\times$(electric field)$^2$. If the source is a plane wave of amplitude $E_0$, then these cross-sections should be normalized by $E_0^2$. For non-plane wave sources, the cross-sections should be normalized depending on the convention being used, typically an averaged intensity over some area: \begin{equation} E_0^2 = \frac{1}{A} \int_A \boldsymbol{S}(\boldsymbol{r}) \cdot \bm{\hat n} \;dA \end{equation} where $\bm{S} = \frac{1}{2} \bm{E} \times \bm{H^*}$ is the Poynting vector. For instance, the area $A$ may be a circular aperture (power striking a structure) or all of space (total power of the source, provided it is finite). \subsection{Force and torque} \newcommand{\mst}{\langle \boldsymbol{T} \rangle} The time-average force $\langle \bm{F} \rangle$ and torque $\langle \bm{\tau} \rangle$ on a particle can be determined by integrating the Maxwell stress tensor $\mst$ over a closed surface surrounding the particle: \begin{subequations} \begin{align} \mst &= \frac{1}{2} \text{Re} \left[ \varepsilon_b \boldsymbol{E} \otimes \boldsymbol{E^*} + \mu_b \boldsymbol{H} \otimes \boldsymbol{H^*} - \frac{1}{2}(\varepsilon_b E^2 + \mu_b H^2)\boldsymbol{I} \right] \\ \langle \boldsymbol{F} \rangle &= \oint_\Omega \mst \cdot d \boldsymbol{\Omega} \\ \langle \boldsymbol{\tau} \rangle &= \oint_\Omega \boldsymbol{r} \times \mst \cdot d \boldsymbol{\Omega} \end{align} \end{subequations} where $\bm{I}$ is a 3x3 identitiy matrix and $\otimes$ is the vector outer product. The electric and magnetic fields around particle $j$ can be calculated using the field expansions, \cref{eqn:electric_field_expansion,eqn:magnetic_field_expansion}. The integration for the time-averaged force can then be carried out analytically in the far-field, resulting in a sum over the particle expansion coefficients. \cite{barton1989theoretical} For these equations, it is helpful to denote $a_{mn} = p_{mn1}^{j,\text{inc}}$, $b_{mn} = p_{mn2}^{j,\text{inc}}$, $p_{mn} = p_{mn1}^{j,\text{scat}}$, and $q_{mn} = p_{mn2}^{j,\text{scat}}$. Then, the force is given by \begin{subequations} \begin{align} \begin{split} F_x + iF_y =& \frac{\pi}{k^2} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \frac{1}{n+1}\bigg\{ \frac{\sqrt{(n+m+1)(n-m)}}{n}\frac{\varepsilon_b}{\mu_b} \bigg[2a_{mn}b_{m+1n}^* - a_{mn}q_{m+1n}^* \\ & - p_{mn} b_{m+1n}^* + 2b_{mn}a_{m+1n}^* - b_{mn}p_{m+1n}^* - q_{mn}a_{m+1n}^* \bigg] \\ & - \sqrt{\frac{(n+m+2)(n+m+1)n(n+2)}{(2n+3)(2n+1)}} \bigg[ 2 \varepsilon_b a_{mn}a_{m+1n+1}^* - \varepsilon_b a_{mn}p_{m+1n+1}^* \\ & - \varepsilon_b p_{mn}a_{m+1n+1}^* + 2 \frac{\varepsilon_b}{\mu_b} b_{mn}b_{m+1n+1}^* - \frac{\varepsilon_b}{\mu_b} b_{mn}q_{m+1n+1}^* - \frac{\varepsilon_b}{\mu_b} q_{mn}b_{m+1n+1}^*\bigg] \\ & + \sqrt{\frac{(n-m+1)(n-m+2)n(n+2)}{(2n+3)(2n+1)}} \bigg[ 2 \varepsilon_b a_{m-1n+1}a_{mn}^* - \varepsilon_b a_{m-1n+1}p_{mn}^* \\ & - \varepsilon_b p_{m-1n+1}a_{mn}^* + 2 \frac{\varepsilon_b}{\mu_b} b_{m-1n+1}b_{mn}^* - \frac{\varepsilon_b}{\mu_b} b_{m-1n+1}q_{mn}^* - \frac{\varepsilon_b}{\mu_b} q_{m-1n+1}b_{mn}^*\bigg] \bigg\} \end{split} \end{align} \begin{align} \begin{split} F_z =& -\frac{2\pi}{k^2} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \frac{1}{n+1}\text{Re}\bigg\{ \frac{m}{n} \frac{\varepsilon_b}{\mu_b} \bigg[ 2a_{mn}b_{mn}^* - a_{mn}q_{mn}^* - p_{mn}b_{mn}^* \bigg] \\ & + \sqrt{\frac{(n-m+1)(n+m+1)n(n+2)}{(2n+3)(2n+1)}} \bigg[ 2 \varepsilon_b a_{mn+1}a_{mn}^* - \varepsilon_b a_{mn+1}p_{mn}^* - \varepsilon_b p_{mn+1}a_{mn}^* \\ & + 2 \frac{\varepsilon_b}{\mu_b} b_{mn+1}b_{mn}^* - \frac{\varepsilon_b}{\mu_b} b_{mn+1}q_{mn}^* - \frac{\varepsilon_b}{\mu_b} q_{mn+1}b_{mn}^* \bigg] \bigg\} \end{split} \end{align} \end{subequations} Similarly, the torque can be integrated analytically, \begin{subequations} \begin{align} \begin{split} \tau_x =& \frac{2\pi}{k^3} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sqrt{(n-m)(n+m+1)} \; \text{Re} \bigg\{ \varepsilon_b a_{mn}a_{m+1n}^* + \mu_b b_{mn}b_{m+1n}^* \\ & - \frac{1}{2} \bigg[ \varepsilon_b a_{m+1n}p_{mn}^* + \varepsilon_b a_{mn}p_{m+1n}^* + \mu_b b_{m+1n}q_{mn}^* + \mu_b b_{mn}q_{m+1n}^*\bigg] \bigg\} \end{split} \end{align} \begin{align} \begin{split} \tau_y =& \frac{2\pi}{k^3} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \sqrt{(n-m)(n+m+1)} \; \text{Im} \bigg\{ \varepsilon_b a_{mn}a_{m+1n}^* + \mu_b b_{mn}b_{m+1n}^* \\ & + \frac{1}{2} \bigg[ \varepsilon_b a_{m+1n}p_{mn}^* - \varepsilon_b a_{mn}p_{m+1n}^* + \mu_b b_{m+1n}q_{mn}^* - \mu_b b_{mn}q_{m+1n}^*\bigg] \bigg\} \end{split} \end{align} \begin{align} \begin{split} \tau_z =& -\frac{2\pi}{k^3} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} m \bigg\{ \varepsilon_b |a_{mn}|^2 + \mu_b |b_{mn}|^2 - \text{Re} \bigg[ \varepsilon_b a_{mn}p_{mn}^* + \mu_b b_{mn}q_{mn}^*\bigg] \bigg\} \end{split} \end{align} \end{subequations} \subsection{Far-field expansions} In the far-field, the VSH functions take on a simplified form from their exact expression in \cref{eqn:vsh_definition}. The radial component of $\bm{N}$ vanishes and the spherical Bessel functions approach an asymptotic form. For spherically outgoing modes, the spherical Hankel function of the first kind is $h_n^{(1)}(kr) \simeq i^{-n-1} e^{ikr}/(kr)$ as $kr \rightarrow \infty$. Given a set of scattering expansion coefficients $q_{mnr}$, the far fields can be expanded over a single sum of the multipolar modes: \begin{align} \begin{split} E_{\text{scat},\theta}(\theta,\phi) &= i\frac{e^{ikr}}{kr} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} (-i)^nE_{mn} \big[q_{mn1}\tau_{mn}(\cos\theta) + q_{mn2}\pi_{mn}(\cos\theta)\big] e^{im\phi} \\ E_{\text{scat},\phi}(\theta,\phi) &= -\frac{e^{ikr}}{kr} \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} (-i)^nE_{mn} \big[q_{mn1}\pi_{mn}(\cos\theta) + q_{mn2}\tau_{mn}(\cos\theta)\big] e^{im\phi} \\ \end{split} \end{align} One option is to set $q_{mnr} = p_{mnr}^\text{cluster}$, the cluster expansion coefficients. This approach comes with a loss of information, but has the physical interpretation that each term in the sum can be attributed to the far-fields of a given multipolar mode. To avoid the loss of information, a sum over particle pairs can be performed \cite{Xu_1997} \begin{equation} q_{\alpha}(\theta, \phi) = \sum_{l=1}^N \sum_{j=1}^N \sum_\beta \exp \left[i(\bm{\hat k} - \bm{\hat r}) \cdot \bm{r}^l\right] \exp \left[i \bm{\hat k} \cdot (\bm{r}^l - \bm{r}^j) \right] \tmatrix_{\alpha \beta}^{lj} p_{\beta}^\text{j,\text{inc}} \end{equation} where the expansion coefficients now depend on the angular position. \subsection{Orientation-averaged properties} \section{Source decomposition} To solve the particle interaction equation, \cref{eqn:particle_interactions}, the expansion coefficients of the source field must be known at each particle. Given the electric fields of the incident source, $\boldsymbol{E}^\text{src}(\boldsymbol{r})$, the source can be decomposed into expansion coefficients by integration around particle $j$ \begin{equation} p_{mnr}^{j,\text{src}} = i\cfrac{\int_\Omega \boldsymbol{E}^\text{src} \cdot \boldsymbol{N}_{mnr}^{(1)*} \; d\Omega} {E_{mn} \langle \boldsymbol{N}_{mnr}^{(1)},\boldsymbol{N}_{mnr}^{(1)} \rangle} \\ \end{equation} where $\Omega$ is a closed surface around particle $j$. For a plane wave, this decomposition can be carried out analytically. The direction of the incident $\bm{k}$ vector can be described by the two spherical angles $\theta$ and $\phi$. A linear polarization of the plane wave is either TM ($\bm{\hat \theta}$) or TE ($\bm{\hat \phi}$). Elliptically polarized light is then a complex-valued linear combination of TM and TE polarizations. \begin{align} \begin{split} p_{mn}^{j,\text{src}} &= E_0 i^{-n} E_{mn} \tau_{mn}(\cos \alpha) \exp(-im\beta) \exp(i\bm{k} \cdot \bm{r}_j) \\ q_{mn}^{j,\text{src}} &= E_0 i^{-n} E_{mn} \pi_{mn}(\cos \alpha) \exp(-im\beta) \exp(i\bm{k} \cdot \bm{r}_j) \end{split} \end{align} \subsection{Far-field integration} \subsection{Near-field point matching} \subsection{Focused beams} \subsection{Reflection off of an interface} \section{Efficient numerical implementation} \subsection{T-matrix evaluation} The T-matrix can be numerically evaluated using the so-called extended boundary condition method (EBCM). \cite{waterman1965matrix, barber1975scattering, mishchenko1996t} In this method, the expansion coefficients are related to one another by the equivalence principle \begin{align} \begin{split} p^\text{inc}_\alpha &= iQ^{31}_{\alpha\beta} p^\text{int}_\beta \\ p^\text{scat}_\alpha &= -iQ^{11}_{\alpha\beta} p^\text{int}_\beta \end{split} \end{align} where the $Q$ matrices are given by integrals of the VSHW functions over the particle's surface \begin{equation} Q_{\alpha \beta}^{pq} = -f^{(p)}\frac{ik_m^2}{\pi} \left[ \int_S (d\bm{S} \times \bm{N}_{\beta}^{(q)}(k\bm{r})) \cdot \bm{N}_{\hat\alpha}^{(p)}(k_m \bm{r}) + \sqrt{\frac{\varepsilon}{\varepsilon_m}}\int_S (d\bm{S} \times \bm{N}_{\hat\beta}^{(q)}(k\bm{r})) \cdot \bm{N}_{\alpha}^{(p)}(k_m \bm{r}) \right] \end{equation} where $f^{(p)}$ is defined in \cref{eqn:outgoing_factor} and the index notation $\hat \alpha$ means to swap electric and magnetic mode indices, i.e. if $\alpha = (m, n, r)$, then $\hat \alpha = (m, n, 3-r)$. The T-matrices are then determined \begin{align} \tmatrix_{\alpha\beta} &= -Q^{11}_{\alpha\gamma} \left[ Q^{31}_{\gamma\beta} \right]^{-1} \\ \tmatrix_{\alpha\beta}^\text{int} &= -i\left[ Q^{31}_{\alpha\beta} \right]^{-1} \end{align} \subsection{Evaluation of VSH transition coefficients} \subsection{VSH rotation-translation-rotation algorithm} \subsection{Matrix-free iterative solver} \subsection{Preconditioner based on spatial locality} \subsection{Parallel execution} \section{Other conventions} A different convention for the field expansions used in other work \cite{barton1989theoretical} is presented here. These field expansions were used to evaluate analytic expressions for the force and torque. \hfill \textit{Electric field} \begin{subequations} \begin{align} \begin{split} \boldsymbol{E}_\text{inc}^j = \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \bigg\{ \boldsymbol{\hat r}\frac{1}{r^2} &\bigg[ n(n+1) p_{mn}^j \psi_n(kr) Y_{nm}(\theta,\phi) \bigg] \\ +\; \boldsymbol{\hat \theta}\frac{k}{r} &\left[ p_{mn}^j \psi_n^\prime(kr) \frac{\partial}{\partial \theta} Y_{nm}(\theta,\phi) - \frac{m}{\sqrt{\varepsilon_b}} q_{mn}^j \psi_n(kr) \frac{Y_{nm}(\theta,\phi)}{\sin\theta} \right] \\ +\; \boldsymbol{\hat \phi}\frac{k}{r} &\left[ im p_{mn}^j \psi_n^\prime(kr) \frac{Y_{nm}(\theta,\phi)}{\sin\theta} - \frac{i}{\sqrt{\varepsilon_b}} q_{mn}^j \psi_n(kr) \frac{\partial}{\partial \theta} Y_{nm}(\theta,\phi) \right] \bigg\} \end{split} \end{align} \begin{align} \begin{split} \boldsymbol{E}_\text{scat}^j = \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \bigg\{ \boldsymbol{\hat r}\frac{1}{r^2} &\bigg[ n(n+1) a_{mn}^j \xi_n^{(1)}(kr) Y_{nm}(\theta,\phi) \bigg] \\ +\; \boldsymbol{\hat \theta}\frac{k}{r} &\left[ a_{mn}^j \xi_n^{(1)\prime}(kr) \frac{\partial}{\partial \theta} Y_{nm}(\theta,\phi) - \frac{m}{\sqrt{\varepsilon_b}} b_{mn}^j \xi_n^{(1)}(kr) \frac{Y_{nm}(\theta,\phi)}{\sin\theta} \right] \\ +\; \boldsymbol{\hat \phi}\frac{k}{r} &\left[ im a_{mn}^j \xi_n^{(1)\prime}(kr) \frac{Y_{nm}(\theta,\phi)}{\sin\theta} - \frac{i}{\sqrt{\varepsilon_b}} b_{mn}^j \xi_n^{(1)}(kr) \frac{\partial}{\partial \theta} Y_{nm}(\theta,\phi) \right] \bigg\} \end{split} \end{align} \end{subequations} \textit{Magnetic field} \begin{subequations} \begin{align} \begin{split} \boldsymbol{H}_\text{inc}^j = \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \bigg\{ \boldsymbol{\hat r}\frac{1}{r^2} &\bigg[ n(n+1) q_{mn}^j \psi_n(kr) Y_{nm}(\theta,\phi) \bigg] \\ +\; \boldsymbol{\hat \theta}\frac{k}{r} &\left[ q_{mn}^j \psi_n^\prime(kr) \frac{\partial}{\partial \theta} Y_{nm}(\theta,\phi) + m\sqrt{\varepsilon_b} p_{mn}^j \psi_n(kr) \frac{Y_{nm}(\theta,\phi)}{\sin\theta} \right] \\ +\; \boldsymbol{\hat \phi}\frac{k}{r} &\left[ im q_{mn}^j \psi_n^\prime(kr) \frac{Y_{nm}(\theta,\phi)}{\sin\theta} + i\sqrt{\varepsilon_b} p_{mn}^j \psi_n(kr) \frac{\partial}{\partial \theta} Y_{nm}(\theta,\phi) \right] \bigg\} \end{split} \end{align} \begin{align} \begin{split} \boldsymbol{H}_\text{scat}^j = \sum_{n=1}^{N_\text{max}} \sum_{m=-n}^{n} \bigg\{ \boldsymbol{\hat r}\frac{1}{r^2} &\bigg[ n(n+1) b_{mn}^j \xi_n^{(1)}(kr) Y_{nm}(\theta,\phi) \bigg] \\ +\; \boldsymbol{\hat \theta}\frac{k}{r} &\left[ n_{mn}^j \xi^{(1)\prime}(kr) \frac{\partial}{\partial \theta} Y_{nm}(\theta,\phi) + m\sqrt{\varepsilon_b} a_{mn}^j \xi_n^{(1)}(kr) \frac{Y_{nm}(\theta,\phi)}{\sin\theta} \right] \\ +\; \boldsymbol{\hat \phi}\frac{k}{r} &\left[ im b_{mn}^j \xi_n^{(1)\prime}(kr) \frac{Y_{nm}(\theta,\phi)}{\sin\theta} + i\sqrt{\varepsilon_b} a_{mn}^j \xi_n^{(1)}(kr) \frac{\partial}{\partial \theta} Y_{nm}(\theta,\phi) \right] \bigg\} \end{split} \end{align} \end{subequations} where $\xi_n^{(1)} = \psi_n - i \chi_n$, $\psi_n$, $\chi_n$ are the Riccati-Bessel function of the first and second kind, and $Y_{nm}$ are the spherical harmonics \begin{align} \begin{split} \psi_n(x) &= xj_n(x) \\ \chi_n(x) &= -xy_n(x) \\ \xi_n^{(1)}(x) &= x[j_n(x) + iy_n(x)] = xh_n^{(1)}(x) \\ Y_{nm}(\theta, \phi) &= \sqrt{\frac{2n+1}{4\pi}\frac{(n-m)!}{(n+m)!}} P_n^m(\cos \theta) e^{im\phi} \end{split} \end{align} Denoting the coefficients of our convention $\bar a_n$, $\bar b_n$, $\bar p_{mn}$, $\bar q_{mn}$, $\bar a_{mn}$, $\bar b_{mn}$, the two conventions are related by \begin{align} \begin{split} \bar a_{n} &= - a_{n} \\ \bar b_{n} &= - b_{n} \\ \bar p_{mn} &= \frac{k^2}{i^{n-1}}\sqrt{\frac{n(n+1)}{4\pi}} p_{mn} \\ \bar q_{mn} &= -\frac{k^2}{i^n}\sqrt{\frac{\mu_b}{\varepsilon_b} \frac{n(n+1)}{4\pi}} q_{mn} \\ \bar a_{mn} &= -\frac{k^2}{i^{n-1}}\sqrt{\frac{n(n+1)}{4\pi}} a_{mn} \\ \bar b_{mn} &= \frac{k^2}{i^n}\sqrt{\frac{\mu_b}{\varepsilon_b} \frac{n(n+1)}{4\pi}} b_{mn} \end{split} \end{align} Another convention uses a different value for the $E_{mn}$ normalization values \cite{xu1995electromagnetic} \begin{equation} E_{mn} = |E_0|i^n \frac{2n+1}{n(n+1)} \end{equation} where $|E_0|$ is the amplitude of the source field. We have chosen to absorb this amplitude into the $a$, $b$, $p$, and $q$ coefficients. \section{MiePy: a GMMT Python library} \subsection{Installation} \noindent Install required dependencies: \begin{itemize}[label={\tiny\raisebox{1ex}{\textbullet}}] \item \href{https://cmake.org/install/}{CMake} (C++ build system) \item \href{http://eigen.tuxfamily.org/index.php?title=Main_Page}{Eigen} (C++ linear algebra library) \item \href{https://www.gnu.org/software/gsl/}{GNU Scientific Library (GSL)} \item GCC and GFORTRAN \item Python 3 and pip \end{itemize} Run the commands: \begin{lstlisting} git clone https://[email protected]/johnapark/miepy.git --recurse-submodules cd miepy pip install . -e --user # build and install pytest tests # run tests \end{lstlisting} \subsection{Example script} Example Python code using MiePy \begin{lstlisting} import miepy nm = 1e-9 # define material and source Ag = miepy.materials.Ag() source = miepy.sources.plane_wave(polarization=[1,0]) # build an Ag dimer with radii 50nm separated by 600nm in the x-direction dimer = miepy.sphere_cluster(position=[[300*nm,0,0], [-300*nm,0,0]], radius=50*nm, material=Ag, source=source, wavelength=800*nm, lmax=2) # obtain the cross-sections scat, absorb, extinct = dimer.cross_sections() # obtain the force and torque on the right particle F = dimer.force_on_particle(0) T = dimer.torque_on_particle(0) \end{lstlisting} \subsection{Target features} \begin{itemize} \item[] \textbf{version 0.3} (released) \begin{itemize}[label={\tiny\raisebox{1ex}{\textbullet}}] \item 30$\times$ performance for larger clusters \item T-matrix formulation for non-spherical particles \item Plane-waves and beams can have arbitrary direction and polarization \end{itemize} \item[] \textbf{version 0.4} \begin{itemize}[label={\tiny\raisebox{1ex}{\textbullet}}] \item C++ implementation with OpenMP parallelization and Python bindings (using pybind11) \item Periodic boundary conditions (1D and 2D) \item Symmetry relationships (mirror, discrete rotation) \item Additional performance: (i) A/B remainging symmetries, (iv) no T-matrix recomputation for identical geometries (v) transition from [2, rmax] $\rightarrow$ [rmax] shape \item Save solution so that it can be reloaded \item Any changes to T-matrix/sources \end{itemize} \item[] \textbf{version 0.5} \begin{itemize}[label={\tiny\raisebox{1ex}{\textbullet}}] \item Additional functions: (i) cluster T-matrix, (ii) S-matrix, Mueller/Jones matrix, (iii) spin vs. orbital torque, (iv) local density of states, (v) energy and momentum density \item Performance: (i) lmax per particle, (ii) cluster T-matrix re-usage with many sources (iii) computing A/B for all modes using upwards recursion/symmetries \item ``unpolarized'' light \item Average over orientations calculations \item Scene visualization (3D) \item Prettier output for main classes using \_\_repr\_\_ \item Existing TODO items \item Improved documentation, examples, tests, and tutorial/introduction for quick start \end{itemize} \item[] \textbf{future} \begin{itemize}[label={\tiny\raisebox{1ex}{\textbullet}}] \item Substrates, layered substrates \item Beyond the Rayleigh hypothesis for non-spherical particle interactions \item More T-matrix options: (i) non-axisymmetric particles, Gaussian spheres/cylinders, layered spheres/spheroids, etc. (ii) chiral materials, (iii) anisotropic materials (expand definition of material class) \item Valid field expansion in circumscribing spheres (possibly using discrete sources) \item 3D periodic boundary conditions \item Band diagram calculations \item Performance: (i) MPI/GPU parallelization (ii) matrix-free solver and rotation-translation-rotation (RTR) coupling method, (iii) approximation for long-range interactions in large systems, (iv) preconditioner for iterative matrix solver based on spatial locality, (v) grid aggregation + FFT to obtain $\mathcal{O}(N \log N$) performance \item Time-domain via IFFT \end{itemize} \end{itemize} \bibliography{generalized_mie_theory} \end{document}
{ "alphanum_fraction": 0.6555122297, "avg_line_length": 49.3181818182, "ext": "tex", "hexsha": "fe0f8e828e4407f6255fff90ce6d7c28941f352e", "lang": "TeX", "max_forks_count": 5, "max_forks_repo_forks_event_max_datetime": "2018-03-23T07:11:30.000Z", "max_forks_repo_forks_event_min_datetime": "2016-12-13T02:05:31.000Z", "max_forks_repo_head_hexsha": "5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be", "max_forks_repo_licenses": [ "MIT" ], "max_forks_repo_name": "johnaparker/MiePy", "max_forks_repo_path": "docs/generalized_mie_theory.tex", "max_issues_count": null, "max_issues_repo_head_hexsha": "5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be", "max_issues_repo_issues_event_max_datetime": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_licenses": [ "MIT" ], "max_issues_repo_name": "johnaparker/MiePy", "max_issues_repo_path": "docs/generalized_mie_theory.tex", "max_line_length": 221, "max_stars_count": 3, "max_stars_repo_head_hexsha": "5c5bb5a07c8ab79e9e2a9fc79fb9779e690147be", "max_stars_repo_licenses": [ "MIT" ], "max_stars_repo_name": "johnaparker/MiePy", "max_stars_repo_path": "docs/generalized_mie_theory.tex", "max_stars_repo_stars_event_max_datetime": "2017-08-30T19:58:56.000Z", "max_stars_repo_stars_event_min_datetime": "2016-05-30T06:45:29.000Z", "num_tokens": 14756, "size": 42315 }